master
{admin@attacker.club} 2024-07-21 01:29:01 +08:00
commit 8a204467fa
168 changed files with 7107 additions and 0 deletions

73
1.docs/1.1 docker.md Normal file
View File

@ -0,0 +1,73 @@
<!--
* @Author: admin@attacker.club
* @Date: 2022-09-14 21:38:54
* @LastEditTime: 2023-02-16 10:13:18
* @Description:
-->
# docker 部署
部署
- 在线 shell 安装
```bash
curl -sSL https://get.docker.com/ | sh
```
- yum
```bash
## 清理老的版本
sudo yum remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-engine
## yum在线安装
sudo yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
# 添加Docker软件包源
sudo yum install docker-ce docker-ce-cli containerd.io
# 安装Docker CE
systemctl start docker && systemctl enable docker
# 启动服务
```
## docker 配置
```bash
cat > /etc/docker/daemon.json << EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"graph": "/var/lib",
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"registry-mirrors": [
"https://1lcdq5an.mirror.aliyuncs.com",
"https://mirror.ccs.tencentyun.com",
"http://hub-mirror.c.163.com"
]
}
EOF
# log日志保存大小设置为100M
# "graph": "/data/docker"指定docker默认数据路径
# "exec-opts": ["native.cgroupdriver=systemd"],
## 调整docker Cgroup Driver为systemd和日志格式设定
systemctl restart docker # 重启docker
```

View File

@ -0,0 +1,19 @@
<!--
* @Author: admin@attacker.club
* @Date: 2022-12-10 22:27:24
* @LastEditTime: 2023-12-14 17:12:13
* @Description:
-->
# 二进制 docker-compose
下载地址: https://github.com/docker/compose/releases
```bash
# wget -O /usr/local/sbin/docker-compose https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m)
curl -L https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m) -o /usr/local/sbin/docker-compose
chmod +x /usr/local/sbin/docker-compose
docker-compose version # 查看docker-compose版本
```

49
1.docs/2.1 harbor.md Normal file
View File

@ -0,0 +1,49 @@
<!--
* @Author: admin@attacker.club
* @Date: 2022-09-14 21:38:54
* @LastEditTime: 2024-03-05 22:26:51
* @Description:
-->
# harbor 镜像仓库
```bash
wget -c https://github.com/goharbor/harbor/releases/download/v2.3.4/harbor-offline-installer-v2.3.4.tgz
tar zxvf harbor-offline-installer*.tgz
cd harbor
cp harbor.yml.tmpl harbor.yml
grep hostname harbor.yml
# 修改 hostname地址http,https配置
./install.sh # 执行安装脚本
## compose启动
docker-compose down
docker-compose up -d
## 登录私有仓库 dockerhub
docker login
## 登录私有仓库
docker login harbor.opsbase.cn
```
## 更新证书
```bash
# 如果证书
cd harbor/
docker cp harbor.enterx.cc.key nginx:/etc/cert/server.key
docker cp harbor.enterx.cc_bundle.pem nginx:/etc/cert/server.crt
docker-compose down && docker-compose up -d
```
## tag 使用
```bash
docker tag myblog:v1 harbor.opsbase.cn/public/myblog:v1
docker push harbor.opsbase.cn/public/myblog:v1
# 打tag 推送到harbor
docker pull harbor.opsbase.cn/public/myblog:v1
# 拉取镜像到本地
```

348
1.docs/kubernets-install.sh Normal file
View File

@ -0,0 +1,348 @@
#!/bin/bash
# bash kubernets-install.sh master
# bash kubernets-install.sh node
# 指定 Kubernetes 版本; 参考https://github.com/kubernetes/kubernetes/releases
custom_version="1.27.4"
read -p "是否修改Kubernetes版本? 当前为: $custom_version (y/n) " custom_version
# 根据用户选择设置kubernetes_version变量
if [ "$custom_version" == "y" ]; then
read -p "请输入自定义的Kubernetes版本: " custom_kubernetes_version
kubernetes_version="$custom_kubernetes_version"
fi
# 输出选择的Kubernetes版本
echo "选择的Kubernetes版本是: $kubernetes_version"
# 判断传递的参数如果没有传递或传递的是错误参数则默认安装master节点
node_type=${1:-"master"}
# 脚本用途说明
cat <<EOF
该脚本用于安装 Kubernetes 集群,并根据地区选择合适的镜像源。
请在运行脚本之前确认:
========================================
1. Master节点: ./kubernets-install.sh master
2. worker节点: ./kubernets-install.sh worker
3. 指定kubernetes安装版本;
6. 默认使用flannel网络组件,可注释并改为install_network_plugin_calico
========================================
EOF
# 检查当前用户是否为 root 用户
check_root_user() {
if [[ $EUID -ne 0 ]]; then
echo "请使用 root 用户执行此脚本。"
exit 1
fi
}
# 判断是否为中国地区
is_china() {
# 使用简单的方法判断,您也可以根据实际需求添加更多判断条件
if [[ $(curl -sSL https://ipapi.co/country/) = "CN" ]]; then
return 0
else
return 1
fi
}
# 根据地区选择镜像源
select_country() {
if is_china; then
echo "检测在中国地区,将使用国内镜像源。"
docker_image_repository="registry.aliyuncs.com/google_containers"
yum_repository="https://mirrors.aliyun.com/kubernetes"
apt_repository="https://mirrors.aliyun.com/kubernetes/apt"
flannel="https://gitee.com/mirrors/flannel/raw/master/Documentation/kube-flannel.yml"
calico="https://docs.projectcalico.org/v3.20/manifests/calico.yaml --image-repository=registry.cn-hangzhou.aliyuncs.com/calico"
else
echo "检测不在中国地区,将使用官方镜像源。"
docker_image_repository="registry.k8s.io"
yum_repository="https://packages.cloud.google.com"
apt_repository="https://apt.kubernetes.io"
flannel="https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml"
calico="https://docs.projectcalico.org/v3.20/manifests/calico.yaml"
fi
}
# 检查是否已安装 Kubernetes
check_kubernetes_installed() {
if command -v kubeadm >/dev/null 2>&1; then
echo "已检测到已安装的 Kubernetes。"
read -p "是否卸载已存在的 Kubernetes?(y/n): " uninstall_choice
if [[ $uninstall_choice = "y" || $uninstall_choice = "Y" ]]; then
uninstall_kubernetes
else
echo "已取消安装。"
exit 0
fi
fi
}
# 卸载 Kubernetes
uninstall_kubernetes() {
echo "正在卸载 Kubernetes..."
case $os in
ubuntu)
uninstall_kubernetes_ubuntu
;;
centos)
uninstall_kubernetes_centos
;;
amazon_linux)
uninstall_kubernetes_centos
;;
*)
echo "不支持的操作系统。"
exit 1
;;
esac
echo "Kubernetes 已成功卸载。"
}
# 获取操作系统信息
get_os_info() {
if [ -f /etc/os-release ]; then
. /etc/os-release
if [[ $ID = "ubuntu" ]]; then
os="ubuntu"
elif [[ $ID = "centos" ]]; then
os="centos"
elif [[ $ID = "amzn" ]]; then
os="amazon_linux"
fi
elif [ -f /etc/redhat-release ]; then
if grep -q "CentOS Linux release 7" /etc/redhat-release; then
os="centos"
fi
fi
}
# 卸载 KubernetesUbuntu
uninstall_kubernetes_ubuntu() {
echo "正在卸载 Kubernetes..."
if command -v kubeadm &>/dev/null; then
kubeadm reset -f
else
echo "kubeadm 未找到,无法执行重置操作。请手动重置 Kubernetes。"
fi
if command -v kubectl &>/dev/null; then
kubectl delete -f $flannel
kubectl delete -f $calico
apt remove -y kubeadm kubelet kubectl containerd
rm -rf /etc/kubernetes /var/lib/etcd /var/lib/kubelet
else
echo "kubectl 未找到,无法执行删除操作。请手动删除相关资源。"
fi
}
# 卸载 KubernetesCentOS
uninstall_kubernetes_centos() {
echo "正在卸载 Kubernetes..."
if command -v kubectl &>/dev/null; then
kubectl delete -f $flannel
kubectl delete -f $calico
yum --debuglevel=1 remove -y kubeadm kubelet kubectl containerd bash-completion
yum autoremove -y
rm -rf /etc/kubernetes /var/lib/etcd /var/lib/kubelet
else
echo "kubectl 未找到,无法执行删除操作。请手动删除相关资源。"
fi
}
# 关闭并禁用防火墙Ubuntu、CentOS
disable_firewall() {
echo "正在关闭并禁用防火墙..."
if [[ $os = "ubuntu" ]]; then
ufw disable
elif [[ $os = "centos" || $os = "amazon_linux" ]]; then
systemctl stop firewalld
systemctl disable firewalld
# 清空iptables策略
iptables -F
iptables -X
iptables -Z
iptables -F -t nat
iptables -X -t nat
iptables -Z -t nat
iptables -P INPUT ACCEPT
if [ -s /etc/selinux/config ]; then
setenforce 0
sed -i 's/^SELINUX=.*/SELINUX=disabled/g' /etc/selinux/config
fi
fi
}
# 关闭并禁用 Swap
disable_swap() {
echo "正在关闭并禁用 Swap..."
swapoff -a
sed -i '/swap/d' /etc/fstab
}
# 优化内核参数
optimize_kernel() {
echo "正在优化内核参数..."
sysctl_file="/etc/sysctl.d/kubernetes.conf"
# echo "net.bridge.bridge-nf-call-ip6tables = 1" >$sysctl_file
# echo "net.bridge.bridge-nf-call-iptables = 1" >>$sysctl_file
echo "net.ipv4.ip_forward=1" >>$sysctl_file
echo "vm.max_map_count=262144" >>$sysctl_file
sysctl -p $sysctl_file
}
# 禁用透明大页
disable_transparent_hugepage() {
echo "禁用透明大页..."
thp_file="/etc/systemd/system/disable-thp.service"
echo "[Unit]" >$thp_file
echo "Description=Disable Transparent Huge Pages (THP)" >>$thp_file
echo "DefaultDependencies=no" >>$thp_file
echo "After=local-fs.target" >>$thp_file
echo "Before=apparmor.service" >>$thp_file
echo "" >>$thp_file
echo "[Service]" >>$thp_file
echo "Type=oneshot" >>$thp_file
echo "ExecStart=/bin/sh -c 'echo never > /sys/kernel/mm/transparent_hugepage/enabled && echo never > /sys/kernel/mm/transparent_hugepage/defrag'" >>$thp_file
echo "" >>$thp_file
echo "[Install]" >>$thp_file
echo "WantedBy=multi-user.target" >>$thp_file
chmod 664 $thp_file
systemctl daemon-reload
systemctl enable disable-thp
systemctl start disable-thp
}
# 安装 kubeadm、kubelet 和 kubectl
install_kubernetes() {
echo "正在安装 kubeadm、kubelet 和 kubectl版本$kubernetes_version..."
if [[ $os = "ubuntu" ]]; then
apt update
apt install -y apt-transport-https ca-certificates curl bridge-utils
modprobe br_netfilter # 加载所需的内核模块
curl -fsSL $apt_repository/doc/apt-key.gpg | apt-key add -
echo "deb $apt_repository kubernetes-xenial main" | tee /etc/apt/sources.list.d/kubernetes.list
apt update
apt install -y kubeadm=$kubernetes_version-00 kubelet=$kubernetes_version-00 kubectl=$kubernetes_version-00
elif [[ $os = "centos" || $os = "amazon_linux" ]]; then
cat <<EOF >/etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=${yum_repository}/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=${yum_repository}/yum/doc/yum-key.gpg
${yum_repository}/yum/doc/rpm-package-key.gpg
EOF
yum --debuglevel=1 install -y kubeadm-$kubernetes_version kubelet-$kubernetes_version kubectl-$kubernetes_version
systemctl enable kubelet
echo "添加bash-completion 自动补全"
yum install bash-completion -y
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >>~/.bashrc
fi
}
# 安装 Containerd
install_containerd() {
echo "正在安装 Containerd..."
if [[ $os = "centos" || $os = "amazon_linux" ]]; then
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum --debuglevel=1 install -y containerd
elif [[ $os = "ubuntu" ]]; then
apt install -y containerd
fi
mkdir -p /etc/containerd
# 生成默认配置
containerd config default >/etc/containerd/config.toml
# 配置 systemd cgroup 驱动程序
sed -i 's#SystemdCgroup = false#SystemdCgroup = true#' /etc/containerd/config.toml
sed -i "s#registry.k8s.io#${docker_image_repository}#" /etc/containerd/config.toml
systemctl restart containerd
systemctl enable containerd
}
# 执行 kubeadm init 并复制 kubeconfig 文件
initialize_kubernetes_cluster() {
if command -v kubeadm &>/dev/null; then
kubeadm reset -f
else
echo "kubeadm 未找到,无法执行重置操作。请手动重置 Kubernetes。"
exit 1
fi
echo "正在执行 kubeadm init..."
kubeadm init --kubernetes-version=v${kubernetes_version} \
--image-repository=${docker_image_repository} \
--service-cidr=10.96.0.0/16 \
--pod-network-cidr=10.244.0.0/16 \
-v=5
# --kubernetes-version 指定要安装的Kubernetes版本
# --image-repository=registry.k8s.io 容器镜像仓库默认地址
# --service-cidr Kubernetes Service的IP地址范围
# --pod-network-cidr Kubernetes Pod的IP地址范围
# --control-plane-endpoint=test-k8s-lb.opsbase.cn:6443 控制平面终结点地址,用于在高可用集群中指定负载均衡器的地址。
echo "已成功执行 kubeadm init。"
# ctr 查看镜像list
ctr image ls
echo "正在复制 kubeconfig 文件..."
mkdir -p $HOME/.kube
\cp /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
echo "kubeconfig 文件已复制到 $HOME/.kube/config。"
}
# 安装网络组件Flannel
install_network_plugin_flannel() {
echo "正在安装 Flannel 网络组件..."
echo $flannel
kubectl apply -f $flannel
}
# 安装网络组件Calico
install_network_plugin_calico() {
echo "正在安装 Calico 网络组件..."
kubectl create -f $calico
}
# 主函数
main() {
select_country
get_os_info
check_root_user
check_kubernetes_installed
disable_firewall
disable_swap
disable_transparent_hugepage
install_kubernetes
install_containerd
optimize_kernel
if [[ "$node_type" = "master" ]]; then
initialize_kubernetes_cluster
install_network_plugin_flannel
# 如果想使用 Calico 网络组件,注释掉上面的 "flannel" 函数,然后取消"calico" 行的注释
# install_network_plugin_calico
else
echo "slave节点,跳过集群初始化操作。"
fi
}
# 主函数
main

View File

@ -0,0 +1,9 @@
FROM cptactionhank/atlassian-confluence:7.4.0
USER root
# 将代理破解包加入容器
COPY "atlassian-agent-v1.2.3/atlassian-agent.jar" /opt/atlassian/confluence/
# 设置启动加载代理包
RUN echo 'export CATALINA_OPTS="-javaagent:/opt/atlassian/confluence/atlassian-agent.jar ${CATALINA_OPTS}"' >> /opt/atlassian/confluence/bin/setenv.sh

20
2.docker/confluence.sh Normal file
View File

@ -0,0 +1,20 @@
docker run -d --name confluence
###
# @Author: Logan.Li
# @Gitee: https://gitee.com/attacker
# @email: admin@attacker.club
# @Date: 2022-12-10 22:27:24
# @LastEditTime: 2023-09-28 13:34:20
# @Description:
###
--restart always \
-p 8090:8090 \
-e TZ="Asia/Shanghai" \
-v /home/confluence:/var/atlassian/confluence \
confluence:v1
docker cp mysql-connector-java-5.1.48-bin.jar confluence:/opt/atlassian/confluence/lib
# cp数据库驱动
docker exec -it confluence java -jar /opt/atlassian/confluence/atlassian-agent.jar -p conf -m pp@pangshare.com -n pangshare -o https://www.pangshare.com -s B37H-XJIY-BCSR-FZQQ
#

View File

@ -0,0 +1,17 @@
FROM python:3.8-alpine
WORKDIR /home
COPY . /home
RUN pip install -i http://mirrors.aliyun.com/pypi/simple --trusted-host mirrors.aliyun.com -r requirements.txt
RUN rm -rf /home/env
# RUN python manage.py makemigrations && python manage.py migrate
# CMD [ "python", "./manage.py", "runserver", "0.0.0.0:8000"]
RUN chmod +x run.sh
EXPOSE 8000
CMD ["/bin/sh","run.sh"]
# 容器启动时默认执行的命令
# docker build -t lghost/bind9:latest . # build images
# docker push lghost/bind9:latest # 推送到dockerhub

102
2.docker/docker-install Normal file
View File

@ -0,0 +1,102 @@
#!/bin/bash
#
# 定义日志文件路径
LOG_FILE="/var/log/docker_install.log"
# 定义日志记录函数
function logger() {
local log_level="$1"
local message="$2"
local color_code=""
case "$log_level" in
"error" | "red")
color_code="\e[1;31m"
;;
"warning" | "yellow")
color_code="\e[1;33m"
;;
"success" | "green")
color_code="\e[1;32m"
;;
"info" | "blue")
color_code="\e[1;34m"
;;
esac
echo -e "${color_code}${message}\e[0m"
echo "$message" >> "$LOG_FILE"
}
# 判断是否 root 用户
if [ $(id -u) -ne 0 ]; then
logger "error" "########## 错误:此脚本必须以 root 身份运行! ##########"
exit 1
fi
# 环境检查
if which getenforce && [ $(getenforce) == "Enforcing" ]; then
logger "info" "信息:关闭 SELINUX"
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
fi
# 定义安装函数
function install_docker() {
local install_method="$1"
if [ "$install_method" == "local" ]; then
if [ -f "docker-24.0.6.tar.gz" ]; then
logger "info" "信息:发现本地压缩包,进行解压安装"
tar zxvf docker-24.0.6.tar.gz
mv docker/docker.service /etc/systemd/system/docker.service
cp docker/* /usr/bin
else
logger "warning" "警告:未找到本地压缩包,无法进行本地安装"
return 1
fi
elif [ "$install_method" == "curl" ]; then
logger "info" "信息:尝试通过 curl 安装 Docker"
curl -fsSL https://get.docker.com | sh
elif [ "$install_method" == "yum" ]; then
logger "warning" "警告:尝试通过 Yum 安装 Docker"
sudo yum remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-engine -y
sudo yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
sudo yum install docker-ce -y
else
logger "error" "错误:无效的安装方法"
return 1
fi
systemctl daemon-reload
systemctl enable docker
systemctl start docker
logger "success" "成功:安装 Docker 并启动服务($install_method 方式)"
}
# 尝试本地安装
if install_docker "local"; then
exit 0
fi
# 尝试 curl 安装
if install_docker "curl"; then
exit 0
fi
# 尝试 yum 安装
install_docker "yum"
logger "info" "======================= 安装完成 ======================="

103
2.docker/docker-tools.sh Normal file
View File

@ -0,0 +1,103 @@
#!/bin/bash
# Docker 镜像与容器操作脚本
# 导出 Docker 镜像至指定路径
export_image() {
# 参数检查
if [ -z "$1" ] || [ -z "$2" ]; then
echo "用法: 导出镜像 <镜像名称> <导出路径>"
return 1
fi
local image_name="$1"
local export_path="$2"
echo "正在导出镜像 '$image_name' 至 '$export_path'..."
docker save -o "$export_path" "$image_name"
if [ $? -eq 0 ]; then
echo "镜像导出成功。"
else
echo "镜像导出失败。"
return 1
fi
}
# 将运行中的Docker容器导出为新的镜像
export_container_as_image() {
# 参数检查
if [ -z "$1" ] || [ -z "$2" ]; then
echo "用法: 导出容器为镜像 <容器名称或ID> <新镜像名称>"
return 1
fi
local container_name_or_id="$1"
local new_image_name="$2"
echo "正在将运行中的容器 '$container_name_or_id' 导出为镜像 '$new_image_name'..."
docker commit "$container_name_or_id" "$new_image_name"
if [ $? -eq 0 ]; then
echo "容器已成功导出为镜像。"
else
echo "容器导出为镜像失败。"
return 1
fi
}
# 导入 Docker 镜像文件
import_image() {
# 参数检查
if [ -z "$1" ]; then
echo "用法: 导入镜像 <导入文件路径>"
return 1
fi
local import_path="$1"
if [ ! -f "$import_path" ]; then
echo "文件 '$import_path' 不存在。"
return 1
fi
echo "正在从 '$import_path' 导入 Docker 镜像..."
docker load -i "$import_path"
if [ $? -eq 0 ]; then
echo "镜像导入成功。"
else
echo "镜像导入失败。"
return 1
fi
}
# 打印帮助信息
print_help() {
echo -e "\n操作指南:\n"
echo "+---------------------------+---------------------------------------------------------+"
echo "| 功能 | 命令格式 |"
echo "+---------------------------+---------------------------------------------------------+"
echo "| export_image | ./docker-tools.sh 导出镜像 <镜像名称> <导出路径> |"
echo "| export_container | ./docker-tools.sh 导出容器为镜像 <容器名或ID> <新镜像名>|"
echo "| import_image | ./docker-tools.sh 导入镜像 <导入文件路径> |"
echo "+---------------------------+---------------------------------------------------------+"
echo "| 注: | 使用前请确保已赋予权限,如 'chmod +x docker-tools.sh' |"
echo "+---------------------------+---------------------------------------------------------+"
}
# 主程序:解析命令行参数,执行对应功能或打印帮助
case "$1" in
export_image)
shift
export_image "$@"
;;
import_image)
shift
import_image "$@"
;;
export_container_as_image)
shift
export_container "$@"
;;
*)
print_help
;;
esac

57
2.docker/etl.sh Normal file
View File

@ -0,0 +1,57 @@
#!/bin/bash
###
# @Author: Logan.Li
# @Gitee: https://gitee.com/attacker
# @email: admin@attacker.club
# @Date: 2023-10-19 01:52:35
# @LastEditTime: 2023-10-19 10:11:16
# @Description:
###
DOLPHINSCHEDULER_VERSION=3.2.0
# Initialize the database, make sure database <DATABASE> already exists
docker run -d --name dolphinscheduler-tools \
-e DATABASE="postgresql" \
-e SPRING_DATASOURCE_URL="jdbc:postgresql://localhost:5432/<DATABASE>" \
-e SPRING_DATASOURCE_USERNAME=root \
-e SPRING_DATASOURCE_PASSWORD="Wu20@250" \
-e SPRING_JACKSON_TIME_ZONE="UTC" \
--net host \
apache/dolphinscheduler-tools:"${DOLPHINSCHEDULER_VERSION}" tools/bin/upgrade-schema.sh
# Starting DolphinScheduler service
docker run -d --name dolphinscheduler-master \
-e DATABASE="postgresql" \
-e SPRING_DATASOURCE_URL="jdbc:postgresql://localhost:5432/dolphinscheduler" \
-e SPRING_DATASOURCE_USERNAME=root \
-e SPRING_DATASOURCE_PASSWORD="Wu20@250" \
-e SPRING_JACKSON_TIME_ZONE="UTC" \
-e REGISTRY_ZOOKEEPER_CONNECT_STRING="localhost:2181" \
--net host \
-d apache/dolphinscheduler-master:"${DOLPHINSCHEDULER_VERSION}"
docker run -d --name dolphinscheduler-worker \
-e DATABASE="postgresql" \
-e SPRING_DATASOURCE_URL="jdbc:postgresql://localhost:5432/dolphinscheduler" \
-e SPRING_DATASOURCE_USERNAME=root \
-e SPRING_DATASOURCE_PASSWORD="Wu20@250" \
-e SPRING_JACKSON_TIME_ZONE="UTC" \
-e REGISTRY_ZOOKEEPER_CONNECT_STRING="localhost:2181" \
--net host \
-d apache/dolphinscheduler-worker:"${DOLPHINSCHEDULER_VERSION}"
docker run -d --name dolphinscheduler-api \
-e DATABASE="postgresql" \
-e SPRING_DATASOURCE_URL="jdbc:postgresql://localhost:5432/dolphinscheduler" \
-e SPRING_DATASOURCE_USERNAME=root \
-e SPRING_DATASOURCE_PASSWORD="Wu20@250" \
-e SPRING_JACKSON_TIME_ZONE="UTC" \
-e REGISTRY_ZOOKEEPER_CONNECT_STRING="localhost:2181" \
--net host \
-d apache/dolphinscheduler-api:"${DOLPHINSCHEDULER_VERSION}"
docker run -d --name dolphinscheduler-alert-server \
-e DATABASE="postgresql" \
-e SPRING_DATASOURCE_URL="jdbc:postgresql://localhost:5432/dolphinscheduler" \
-e SPRING_DATASOURCE_USERNAME=root \
-e SPRING_DATASOURCE_PASSWORD="Wu20@250" \
-e SPRING_JACKSON_TIME_ZONE="UTC" \
-e REGISTRY_ZOOKEEPER_CONNECT_STRING="localhost:2181" \
--net host \
-d apache/dolphinscheduler-alert-server:"${DOLPHINSCHEDULER_VERSION}"

6
2.docker/frp.sh Normal file
View File

@ -0,0 +1,6 @@
docker run --restart=always --network host \
-d -v /etc/frps.ini:/etc/frp/frps.ini --name frps lghost/frps:0.48
docker run --restart=always --network host \
-d -v /etc/frpc.ini:/etc/frp/frpc.ini --name frpc lghost/frpc:0.48

15
2.docker/frpc.dockerfile Normal file
View File

@ -0,0 +1,15 @@
FROM alpine:latest
ENV FRP_VERSION 0.48.0
RUN mkdir -p /etc/frp \
&& wget --no-check-certificate -c https://github.com/fatedier/frp/releases/download/v${FRP_VERSION}/frp_${FRP_VERSION}_linux_amd64.tar.gz \
&& tar zxvf frp_${FRP_VERSION}_linux_amd64.tar.gz \
&& cp frp_${FRP_VERSION}_linux_amd64/frpc /usr/bin/ \
&& cp frp_${FRP_VERSION}_linux_amd64/frpc.ini /etc/frp \
&& rm -rf frp_*
ENTRYPOINT /usr/bin/frpc -c /etc/frp/frpc.ini
# docker build -t lghost/frpc .
# docker run --restart=always --network host -d -v /etc/frpc.ini:/etc/frp/frpc.ini --name frpc lghost/frpc:0.48

38
2.docker/frpc.ini Normal file
View File

@ -0,0 +1,38 @@
# frpc.ini FRPC configuration
[common]
server_addr = 21.136.xxx.xxx
server_port = 7000
token = ************
[open]
type = http
local_ip = 192.168.0.254
local_port = 80
remote_port = 80
custom_domains = open.opsbase.cn
[test]
type = http
local_ip = 192.168.0.254
local_port = 80
remote_port = 80
custom_domains = test.opsbase.cn
[tcp4430]
type = tcp
local_ip = 10.10.10.209
local_port = 4430
remote_port = 4430
[ssh]
type = tcp
local_ip = 192.168.0.254
local_port = 22
remote_port = 60022
[RDP]
type = tcp
local_ip = 192.168.0.234
local_port = 3389
remote_port = 63389

15
2.docker/frps.dockerfile Normal file
View File

@ -0,0 +1,15 @@
FROM alpine:latest
ENV FRP_VERSION 0.48.0
RUN mkdir -p /etc/frp \
&& wget --no-check-certificate -c https://github.com/fatedier/frp/releases/download/v${FRP_VERSION}/frp_${FRP_VERSION}_linux_amd64.tar.gz \
&& tar zxvf frp_${FRP_VERSION}_linux_amd64.tar.gz \
&& cp frp_${FRP_VERSION}_linux_amd64/frps /usr/bin/ \
&& cp frp_${FRP_VERSION}_linux_amd64/frps.ini /etc/frp \
&& rm -rf frp_*
ENTRYPOINT /usr/bin/frps -c /etc/frp/frps.ini
# docker build -t lghost/frps -f frps.dockerfile .
# docker run --restart=always --network host -d -v /etc/frps.ini:/etc/frp/frps.ini --name frps lghost/frps:0.48

12
2.docker/frps.ini Normal file
View File

@ -0,0 +1,12 @@
# frps.ini FRPS configuration
[common]
bind_addr=0.0.0.0
bind_port = 7000
token=************
dashboard_port = 7500
dashboard_user = root
dashboard_pwd = password1
vhost_http_port = 80
vhost_https_port = 443
tcp_mux = ture
privilege_mode = ture

19
2.docker/gitlab.sh Normal file
View File

@ -0,0 +1,19 @@
#!/bin/bash
docker stop gitlab
docker rm gitlab
docker run -d \
--p 8443:443 --p 8800:80 --p 2222:22 \
--name gitlab \
--restart always \
--privileged=true \
--volume /data/gitlab/config:/etc/gitlab \
--volume /data/gitlab/logs:/var/log/gitlab \
--volume /data/gitlab/data:/var/opt/gitlab \
gitlab/gitlab-ce
# 指定版本: gitlab/gitlab-ce:12.3.5-ce.0
# --privileged=true 让容器获取宿主机root权限
# /etc/gitlab/gitlab.rb # external_url地址更新
# gitlab-ctl reconfigure # 载入配置
# docker exec -it gitlab cat /etc/gitlab/initial_root_password #查看密码

15
2.docker/gogs.sh Normal file
View File

@ -0,0 +1,15 @@
#!/bin/bash
###
# @Author: Logan.Li
# @Gitee: https://gitee.com/attacker
# @email: admin@attacker.club
# @Date: 2023-09-29 23:32:45
# @LastEditTime: 2023-09-29 23:38:15
# @Description:
###
docker run -d \
--name=gogs \
--restart=always \
-p 3022:22 -p 3000:3000 \
-v /data/docker/gogs:/data gogs/gogs

View File

@ -0,0 +1,18 @@
FROM tomcat:8.5.24-jre8
MAINTAINER Logan "admin@attacker.club"
# 大数据数据质量 后端api服务
ENV TZ=Asia/Shanghai
ENV TOMCAT_HOME=/usr/local/tomcat
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
##RUN rm $TOMCAT_HOME/bin/catalina.sh
COPY doc/catalina.sh $TOMCAT_HOME/bin/catalina.sh
##RUN rm $TOMCAT_HOME/conf/server.xml
COPY doc/server.xml $TOMCAT_HOME/conf/server.xml
RUN rm -rf $TOMCAT_HOME/webapps/*
COPY holmes-web/target/holmes-web-1.0-SNAPSHOT.war $TOMCAT_HOME/webapps/holmes.war
RUN chmod +x /usr/local/tomcat/bin/catalina.sh
EXPOSE 8080

View File

@ -0,0 +1,17 @@
FROM nginx:1.11.10-alpine
MAINTAINER Logan "admin@attacker.club"
# 前端静态
ENV TZ=Asia/Shanghai
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
RUN mkdir -p /home/jollychic/www
COPY dist /home/jollychic/www/dist
# npm编译后的静态文件 放nginx镜像里面
COPY config/nginx.conf /etc/nginx/conf.d/default.conf
COPY init.sh /home/jollychic/init.sh
WORKDIR /home/jollychic
EXPOSE 80
CMD chmod +x init.sh && ./init.sh

16
2.docker/jenkins.sh Normal file
View File

@ -0,0 +1,16 @@
#!/bin/bash
###
# @Author: admin@attacker.club
# @Date: 2022-12-10 22:27:24
# @LastEditTime: 2023-02-15 15:42:49
# @Description:
###
docker run --name jenkins \
--restart=always -d \
-p 8080:8080 \
-v /home/jenkins/data:/var/jenkins_home \
jenkins/jenkins:lts
docker exec -it jenkins cat /var/jenkins_home/secrets/initialAdminPassword
# 查看解锁密钥

42
2.docker/jumpserver.sh Normal file
View File

@ -0,0 +1,42 @@
#!/bin/bash
###
# @Author: admin@attacker.club
# @Date: 2022-12-10 22:27:24
# @LastEditTime: 2023-02-28 20:48:41
# @Description:
###
if [ ! "$SECRET_KEY" ]; then
SECRET_KEY=`cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 50`;
echo "SECRET_KEY=$SECRET_KEY" >> ~/.bashrc;
echo $SECRET_KEY;
else
echo $SECRET_KEY;
fi
if [ ! "$BOOTSTRAP_TOKEN" ]; then
BOOTSTRAP_TOKEN=`cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 16`;
echo "BOOTSTRAP_TOKEN=$BOOTSTRAP_TOKEN" >> ~/.bashrc;
echo $BOOTSTRAP_TOKEN;
else
echo $BOOTSTRAP_TOKEN;
fi
docker run --name jms_all -d \
--restart=always \
-v /opt/jumpserver:/opt/jumpserver/data/media \
-p 80:80 \
-p 2222:2222 \
-e SECRET_KEY=$SECRET_KEY \
-e BOOTSTRAP_TOKEN=$BOOTSTRAP_TOKEN \
-e DB_HOST=192.168.xx.xx \
-e DB_PORT=3306 \
-e DB_USER=jumpserver \
-e DB_PASSWORD='xxxxx' \
-e DB_NAME=jumpserver \
-e REDIS_HOST=192.168.xx.xx \
-e REDIS_PORT=6379 \
-e REDIS_PASSWORD=123456 \
jumpserver/jms_all

14
2.docker/kuboard.sh Normal file
View File

@ -0,0 +1,14 @@
#!/bin/bash
# 获取当前 IP 地址
current_ip=$(hostname -I | awk '{print $1}')
sudo docker run -d \
--restart=unless-stopped \
--name=kuboard \
-p 82:80/tcp \
-p 10081:10081/tcp \
-e KUBOARD_ENDPOINT="http://$current_ip:82" \
-e KUBOARD_AGENT_SERVER_TCP_PORT="10081" \
-v /data/kuboard-data:/data \
eipwork/kuboard:v3

15
2.docker/mysql5.7.sh Normal file
View File

@ -0,0 +1,15 @@
#!/bin/bash
###
# @Author: admin@attacker.club
# @Date: 2022-09-14 21:38:54
# @LastEditTime: 2023-03-31 01:37:18
# @Description:
###
docker run -p 3306:3306 --name mysql \
--restart always \
-v /usr/local/docker/mysql/conf:/etc/mysql \
-v /usr/local/docker/mysql/logs:/var/log/mysql \
-v /usr/local/docker/mysql/data:/var/lib/mysql \
-e MYSQL_ROOT_PASSWORD=123456 \
-d mysql:5.7

21
2.docker/mysql8.sh Normal file
View File

@ -0,0 +1,21 @@
#!/bin/bash
###
# @Author: admin@attacker.club
# @Date: 2022-09-14 21:38:54
# @LastEditTime: 2024-06-30 23:18:01
# @Description:
###
dataDir="/opt/docker-data/mysql"
yum remove mariadb* -y # 卸载mariadb
rpm -ivh https://repo.mysql.com/mysql80-community-release-el7.rpm
yum install mysql-community-client -y # 安装mysql客户端
password=$(cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 12)
docker run \
--restart always \
-d -p 3306:3306 --name mysql \
-v $dataDir:/var/lib/mysql \
-e MYSQL_ROOT_PASSWORD=$password \
mysql:8
echo "passwod: $password" >mysql.txt

19
2.docker/openldap.sh Normal file
View File

@ -0,0 +1,19 @@
#!/bin/bash
## openldap服务
password=`cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 12`
docker run \
-d -p 389:389 -p 636:636 \
--name ldap-service \
--restart=always \
--hostname openldap \
-v /opt/docker-data/slapd/database:/var/lib/ldap \
--volume /opt/docker-data/slapd/config:/etc/ldap/slapd.d \
--env LDAP_ORGANISATION="Opsbase" \
--env LDAP_DOMAIN="opsbase.cn" \
--env LDAP_BASE_DN="dc=opsbase,dc=cn" \
--env LDAP_ADMIN_PASSWORD=$password \
osixia/openldap:latest
echo "passwod: $password" > openldap.txt
# 默认管理员账号 DN:admin.opsbase.cn

25
2.docker/openwrt.sh Normal file
View File

@ -0,0 +1,25 @@
###
# @Author: Logan.Li
# @Gitee: https://gitee.com/attacker
# @email: admin@attacker.club
# @Date: 2023-07-02 00:37:17
# @LastEditTime: 2023-07-02 00:37:28
# @Description:
# https://supes.top/docker%E7%89%88openwrt%E6%97%81%E8%B7%AF%E7%94%B1%E5%AE%89%E8%A3%85%E8%AE%BE%E7%BD%AE%E6%95%99%E7%A8%8B/
###
# 打开网卡混杂模式
ip link set ens33 promisc on
# 创建网络
docker network create -d macvlan \
--subnet=192.168.0.0/24 \
--gateway=192.168.0.11 -o parent=ens33 openwrt-net
docker network ls && docker network inspect openwrt-net
# 下载 xxx-rootfs.tar.gz, https://supes.top/?target=x86%2F64&id=generic
# 加载镜像,创建并启动容器
docker run -it -rm --name openwrt--network openwrt-net openwrt:latest --privileged /sbin/init
docker run -it --rm --name openwrt --network openwrt-net --ip 192.168.0.2 openwrt:latest bash

0
2.docker/php.sh Normal file
View File

16
2.docker/phpldapadmin.sh Normal file
View File

@ -0,0 +1,16 @@
## https://35.xx.xx.xx:6443/
###
# @Author: admin@attacker.club
# @Date: 2023-02-22 19:14:48
# @LastEditTime: 2023-02-22 19:16:41
# @Description:
## 在浏览器输入 https://内网IP:6443 ,按下图所示步骤登录 phpLDAPadmin
## Login DN: cn=admin,dc=example,dc=org 默认管理员用户
## Password: admin 管理员密码
###
docker run -p 6443:443 \
--name ldapadmin \
--link ldap-service:ldap \
--env PHPLDAPADMIN_LDAP_HOSTS=ldap \
--detach osixia/phpldapadmin:0.9.0

20
2.docker/poste-mail.sh Normal file
View File

@ -0,0 +1,20 @@
#!/bin/bash
docker stop mail
docker rm mail
docker run --name "mail" \
-p 25:25 -p 8888:80 \
-e "HTTPS=OFF" \
-e "DISABLE_CLAMAV=TRUE" \
-p 110:110 -p 143:143 -p 465:465 \
-p 587:587 -p 993:993 -p 995:995 \
-v /etc/localtime:/etc/localtime:ro -v \
/home/data:/data \
-h "mail.xxx.com" \
--restart=always -d -t analogic/poste.io
iptables -D INPUT -p tcp -m multiport --dport 25,110,143,465,993,587,995,1022,55557 -j ACCEPT
iptables -I INPUT -p tcp -m multiport --dport 25,110,143,465,993,587,995,1022,55557 -j ACCEPT
iptables-save

View File

@ -0,0 +1,41 @@
FROM python:3.9-alpine
MAINTAINER Logan <admin@attacker.club>
WORKDIR /opt/app
COPY requirements.txt /opt/app
## 国内加速源
RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories
## 工具包
RUN apk --no-cache add curl vim busybox-extras
## ldap依赖
RUN apk --no-cache add gcc \
libldap \
libc-dev \
openldap-dev
# python3-dev
## Pillow图片库依赖
RUN apk --no-cache add libjpeg jpeg-dev musl-dev zlib-dev libffi-dev
# python3-dev
RUN pip --no-cache-dir install -i https://mirrors.aliyun.com/pypi/simple -r requirements.txt
COPY . /opt/app
## 清理数据
RUN apk del \
gcc \
libc-dev \
openldap-dev
# RUN apk del libjpeg jpeg-dev musl-dev zlib-dev libffi-dev
RUN rm -rf /tmp/* /opt/app/env /opt/app/db.sqlite3 /opt/app/logs/* /root/.cache/pip /var/cache/apk/*
EXPOSE 8000
CMD ["/bin/sh","run.sh"]
## 构建镜像
# docker build -t lghost/python:3.9 . -f Dockerfile-alpine
## 简洁启动
# docker run -d -p8000:8000 --name cmdb lghost/python:3.9
## 初始化数据
# docker exec -it cmdb python init.py

View File

@ -0,0 +1,19 @@
FROM centos:7
RUN rm /etc/yum.repos.d/* \
&& curl -s -o ./Centos-7.repo https://mirrors.aliyun.com/repo/Centos-7.repo \
&& curl -s -o ./epel.repo https://mirrors.aliyun.com/repo/epel-7.repo \
&& sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' Centos-7.repo \
&& yum install -y wget openssl-devel bzip2-devel expat-devel gdbm-devel readline-devel sqlite-devel python-devel \
libffi-devel tk-devel
RUN wget -c https://www.python.org/ftp/python/3.9.16/Python-3.9.16.tar.xz \
&& tar xf Python*.xz \
&& cd Python-3.9.16 \
&& ./configure prefix=/usr/local/python3 \
&& make -j 2 && make altinstall && ln -s /usr/local/python3/bin/python3.9 /usr/bin/python3
# docker build -t lghost/python:3.9 . -f Dockerfile-alpine

View File

@ -0,0 +1,13 @@
FROM python:3.9.13-slim
ENV PYTHONUNBUFFERED 1
# RUN sed -i 's/deb.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list # 国内源
WORKDIR /opt/app
RUN apt-get update \
&& apt-get install -y net-tools apt-utils \
libtiff5-dev libjpeg8-dev zlib1g-dev
COPY requirements.txt /opt/app

View File

@ -0,0 +1,19 @@
FROM ubuntu:22.04
MAINTAINER Logan <admin@attacker.club>
WORKDIR /opt
## 依赖包
RUN sed -i s@/archive.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list \
&& apt update \
&& apt install nload iftop net-tools curl git -y \
&& apt install python3 python3-pip -y
## 清理数据
RUN rm -rf /var/lib/apt/lists/* \
&& apt clean \
&& apt autoclean
EXPOSE 8000
# docker build -t lghost/python3 . -f Dockerfile-ubuntu-python3

19
2.docker/redis.sh Normal file
View File

@ -0,0 +1,19 @@
#!/bin/bash
###
# @Author: admin@attacker.club
# @Date: 2022-09-14 21:38:54
# @LastEditTime: 2022-09-29 16:44:42
# @Description:
###
docker run -d \
--name redis \
--restart always \
-p 6379:6379 \
-v /home/docker-data/redis/data:/data \
redis:latest --appendonly yes --requirepass opsbase.cn
# appendonly 启动后数据持久化
# requirepass 指定密码

View File

@ -0,0 +1,17 @@
FROM centos:7
RUN cd /etc/yum.repos.d && mkdir bak && mv *.repo bak/ \
&& curl -s -o ./Centos-7.repo https://mirrors.aliyun.com/repo/Centos-7.repo \
&& curl -s -o ./epel.repo https://mirrors.aliyun.com/repo/epel-7.repo \
&& sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' Centos-7.repo \
&& yum clean all && yum makecache \
&& yum install -y wget vim openssh-server net-tools initscripts \
&& /usr/bin/ssh-keygen -A && mkdir /var/run/sshd && echo 'UseDNS no' >> /etc/ssh/sshd_config && sed -i -e '/pam_loginuid.so/d' /etc/pam.d/sshd \
&& yum clean all && rm -rf /var/tmp/* && rm -rf /var/cache/yum/* \
&& echo 'root:opsbase' |chpasswd
EXPOSE 22
# CMD ["/usr/sbin/sshd","-D"]
ENTRYPOINT ["/usr/sbin/sshd","-D"]
# ## 构建镜像
# docker build -t lghost/centos7-ssh:latest . -f sshd-centos7-dockerfile

36
2.docker/sshd.dockerfile Normal file
View File

@ -0,0 +1,36 @@
FROM alpine
MAINTAINER admin@attacker.club.com
# 替换阿里云的源
RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories
# 更新源、安装openssh 并修改配置文件和生成key
RUN apk update && \
apk add --no-cache openssh tzdata && rm -rf /var/cache/apk/* && \
ssh-keygen -t dsa -P "" -f /etc/ssh/ssh_host_dsa_key && \
ssh-keygen -t rsa -P "" -f /etc/ssh/ssh_host_rsa_key && \
ssh-keygen -t ecdsa -P "" -f /etc/ssh/ssh_host_ecdsa_key && \
ssh-keygen -t ed25519 -P "" -f /etc/ssh/ssh_host_ed25519_key && \
ssh-keygen -A && \
sed -i "/PermitRootLogin/c PermitRootLogin yes" /etc/ssh/sshd_config && \
sed -i 's/#PasswordAuthentication yes/PasswordAuthentication yes/g' /etc/ssh/sshd_config
# 自定义配置
RUN echo "root:123456" | chpasswd && \
echo > /etc/motd && \
echo '''PS1="\[\e[37;1m\][\[\e[32;1m\]\u\[\e[37;40m\]@\[\e[34;1m\]\h \[\e[0m\]\t \[\e[35;1m\]\W\[\e[37;1m\]]\[\e[m\]/\\$" ''' >>/etc/profile
# # 安全配置,禁止网络工具上传和下载
RUN rm /usr/bin/wget /usr/bin/nc /usr/bin/scp /bin/ping /usr/bin/traceroute* /sbin/apk -f
# 开放22端口
EXPOSE 22
# 执行ssh启动命令
CMD ["/usr/sbin/sshd", "-D"]
## Build
# docker build -t lghost/sshd . -f sshd.dockerfile
## Run
# docker run --restart=unless-stopped -p 2222:22 --name sshd lghost/sshd

9
2.docker/sshd.sh Normal file
View File

@ -0,0 +1,9 @@
#!/bin/bash
###
# @author: 以谁为师
# @site: opsbase.cn
# @Date: 2022-04-02 16:04:05
# @LastEditTime: 2022-10-14 12:23:01
# @Description:
###

15
2.docker/yearning.sh Normal file
View File

@ -0,0 +1,15 @@
## 安装指南
https://guide.yearning.io
## 构建镜像
docker build -t yearning:lts . -f Dockerfile
## 启动
docker run -d --name yearning \
-p11110:8000 \
-e MYSQL_DB=Yearning \
-e MYSQL_USER=admin -e MYSQL_ADDR=172.16.100.138:3306 -e MYSQL_PASSWORD=xxxxxx \
yearning:lts

87
2.docker/zabbix.sh Normal file
View File

@ -0,0 +1,87 @@
#!/bin/bash
###
# @Author: admin@attacker.club
# @Date: 2022-09-29 14:35:55
# @LastEditTime: 2023-03-06 01:14:56
# @Description:
###
docker stop zabbix-mysql >2&1
docker stop zabbix-web >2&1
docker stop zabbix-server >2&1
docker rm zabbix-mysql >2&1
docker rm zabbix-web >2&1
docker rm zabbix-server >2&1
## DB服务
dataDir="/opt/docker-data/mysql"
rm ="/opt/docker-data/mysql"
yum remove mariadb* -y # 卸载默认mariadb
rpm -ivh https://repo.mysql.com/mysql80-community-release-el7.rpm
yum install mysql-community-client -y # 安装mysql client
if [ ! "$rootPassword" ]; then
rootPassword=`cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 12`
zbxPassword=`cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 12`
echo "rootPassword=$rootPassword" >> ~/.bashrc;
echo "zbxPassword=$zbxPassword" >> ~/.bashrc;
fi
echo "> 启动mysql"
docker run \
--restart always \
-d -p 3306:3306 \
--name zabbix-mysql \
--hostname zabbix-mysql \
-e MYSQL_ROOT_PASSWORD=${rootPassword} \
-e MYSQL_USER="zabbix" \
-e MYSQL_PASSWORD=${zbxPassword} \
-e MYSQL_DATABASE="zabbix" \
-v $dataDir:/var/lib/mysql \
mysql:8 --character-set-server=utf8 --collation-server=utf8_bin
sleep 10
## 启动zabbix server
echo "> 启动zabbix server"
docker run -d -p 10051:10051 \
--restart always \
--name zabbix-server \
--hostname zabbix-server \
--link zabbix-mysql:mysql \
-e DB_SERVER_HOST="mysql" \
-e MYSQL_USER="zabbix" \
-e MYSQL_PASSWORD="${zbxPassword}" \
-v /etc/localtime:/etc/localtime:ro \
-v /data/docker/zabbix/alertscripts:/usr/lib/zabbix/alertscripts \
-v /data/docker/zabbix/externalscripts:/usr/lib/zabbix/externalscripts \
zabbix/zabbix-server-mysql:ubuntu-6.0-latest
# zabbix/zabbix-server-mysql:latest
## 启动zabbix web
echo "> 启动zabbix web"
docker run -d -p 81:8080 \
--restart always \
--name zabbix-web \
--hostname zabbix-web \
--link zabbix-mysql:mysql \
--link zabbix-server:zabbix-server \
-e DB_SERVER_HOST="mysql" \
-e MYSQL_USER="zabbix" \
-e MYSQL_PASSWORD="${zbxPassword}" \
-e ZBX_SERVER_HOST="zabbix-server" \
-e PHP_TZ="Asia/Shanghai" \
zabbix/zabbix-web-nginx-mysql:6.0-alpine-latest
# zabbix/zabbix-web-nginx-mysql:latest
sleep 3
echo "mysql -h127.0.0.1 -uroot -p$rootPassword" > mysql.txt
echo "mysql -h127.0.0.1 -uzabbix -p$zbxPassword" >> mysql.txt
echo "http://zabbix 账号: Admin / zabbix"
## sql添加远程账号
# CREATE USER 'admin'@'%' ;
# GRANT ALL ON *.* TO 'admin'@'%' IDENTIFIED WITH mysql_native_password BY 'adminPwd123';

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: Pod
metadata:
name: busybox
namespace: kxyyq4
spec:
containers:
- image: busybox
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
name: busybox
restartPolicy: Always

View File

View File

@ -0,0 +1,20 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: hello-world
spec:
selector:
matchLabels:
run: load-balancer-example
replicas: 2
template:
metadata:
labels:
run: load-balancer-example
spec:
containers:
- name: hello-world
image: registry.cn-hangzhou.aliyuncs.com/aliyun_google/google-sample-node-hello:1.0
ports:
- containerPort: 8080
protocol: TCP

View File

@ -0,0 +1,22 @@
apiVersion: apps/v1
kind: Deployment
metadata: # metadata是该资源的元数据name是必须的元数据项
name: nginx-deployment
namespace: test # 指定命名空间
labels:
app: nginx
spec: # spec部分是该Deployment的规则说明
replicas: 2
selector:
matchLabels:
app: nginx
template: # template定义Pod的模板这是配置的重要部分
metadata: # 定义Pod的元数据至少要顶一个labellabel的key和value可以任意指定
labels:
app: nginx
spec: # spec描述的是Pod的规则此部分定义pod中每一个容器的属性name和image是必需的
containers:
- name: nginx
image: nginx:1.23-debian-11 # 提供本地nginx镜像
ports:
- containerPort: 80

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: mysql-cmdb
namespace: test
data:
DB_HOST: "66.94.125.73"
DB_PORT: "63306"
DB_NAME: "cmdb"

View File

@ -0,0 +1,8 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: myblog
namespace: test
data:
MYSQL_HOST: "66.94.125.73"
MYSQL_PORT: "63306"

View File

@ -0,0 +1,82 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: cmdb
namespace: test
spec:
replicas: 1
selector:
matchLabels:
app: cmdb
template:
metadata:
labels:
app: cmdb
spec:
containers:
- name: cmdb
# image: docker.io/lghost/cmdb:latest
image: harbor.opsbase.cn/public/test.demo.python.cmdb:b6c565d
imagePullPolicy: Always
env:
- name: DB_HOST
valueFrom:
configMapKeyRef:
name: mysql-cmdb
key: DB_HOST
- name: DB_PORT
valueFrom:
configMapKeyRef:
name: mysql-cmdb
key: DB_PORT
- name: DB_USER
valueFrom:
secretKeyRef:
name: mysql-cmdb
key: DB_USER
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: mysql-cmdb
key: DB_PASSWORD
- name: REDIS_HOST
valueFrom:
configMapKeyRef:
name: redis-cmdb
key: REDIS_HOST
- name: REDIS_PORT
valueFrom:
configMapKeyRef:
name: redis-cmdb
key: REDIS_PORT
- name: REDIS_PWD
valueFrom:
secretKeyRef:
name: redis-cmdb
key: REDIS_PWD
ports:
- containerPort: 8000
resources:
requests:
memory: 1500Mi
cpu: 100m
limits:
memory: 35000Mi
cpu: 500m
livenessProbe:
httpGet:
path: /prometheus/metrics
port: 8000
scheme: HTTP
initialDelaySeconds: 15 # 容器启动后第一次执行探测是需要等待多少秒
periodSeconds: 120 # 执行探测的频率
timeoutSeconds: 5 # 探测超时时间
readinessProbe:
httpGet:
path: /prometheus/metrics
port: 8000
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 3
periodSeconds: 15

View File

@ -0,0 +1,65 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: myblog
namespace: test
spec:
replicas: 2 # 指定Pod副本数
selector: # 指定Pod的选择器
matchLabels:
app: myblog
template:
metadata:
labels: # 给Pod打label
app: myblog
spec:
containers:
- name: myblog
image: harbor.opsbase.cn/public/myblog
imagePullPolicy: IfNotPresent
env:
- name: MYSQL_HOST
valueFrom:
configMapKeyRef:
name: test-db-configmap
key: Mysql_Host
- name: MYSQL_PORT
valueFrom:
configMapKeyRef:
name: test-db-configmap
key: Mysql_PORT
- name: MYSQL_USER
valueFrom:
secretKeyRef:
name: test-db-secret
key: MYSQL_USER
- name: MYSQL_PASSWD
valueFrom:
secretKeyRef:
name: test-db-secret
key: MYSQL_PASSWD
ports:
- containerPort: 80
resources:
requests:
memory: 100Mi
cpu: 50m
limits:
memory: 500Mi
cpu: 100m
livenessProbe:
httpGet:
path: /blog/index/
port: 80
scheme: HTTP
initialDelaySeconds: 10 # 容器启动后第一次执行探测是需要等待多少秒
periodSeconds: 15 # 执行探测的频率
timeoutSeconds: 2 # 探测超时时间
readinessProbe:
httpGet:
path: /blog/index/
port: 80
scheme: HTTP
initialDelaySeconds: 10
timeoutSeconds: 2
periodSeconds: 15

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: dev-opsbase-ssh-svc
namespace: test
spec:
ports:
- port: 22
protocol: TCP
targetPort: 22
selector:
app: dev-opsbase-ssh
type: ClusterIP

View File

@ -0,0 +1,28 @@
apiVersion: apps/v1 # Api接口版本
kind: Deployment # 定义控制器
metadata:
name: dev-opsbase-ssh # 定义deployment名称
namespace: test
spec:
replicas: 1 # 副本数量,还需要指定副本标签与 Deployment控制器进行匹配
selector: # 指定Pod选择器
matchLabels: # 标签匹配方式
app: dev-opsbase-ssh # 匹配metadata.name名称
template: # pod容器
metadata: # 具体信息
labels: # 定义标签
app: dev-opsbase-ssh # pod名称
spec:
containers:
- name: dev-opsbase-ssh # 容器名称
image: lghost/sshd:v0.1 # 拉取镜像
imagePullPolicy: IfNotPresent # 镜像pull策略
ports:
- containerPort: 22
resources: # 限制资源
requests:
memory: 100Mi
cpu: 50m
limits:
memory: 500Mi
cpu: 100m

View File

@ -0,0 +1,24 @@
apiVersion: v1
kind: Service
metadata:
name: mysql-bos
namespace: base
spec:
ports:
- port: 3306
targetPort: 3306
protocol: TCP
name: mysql-bos
---
kind: Endpoints
apiVersion: v1
metadata:
name: mysql-bos
namespace: base
subsets:
- addresses:
- ip: 10.10.10.60
ports:
- name: mysql-bos
port: 3306
protocol: TCP

View File

@ -0,0 +1,4 @@
# endpoint
使用endpoint 将容器外地址改为数据库连接:
mysql-bos.base:3306

View File

@ -0,0 +1,41 @@
---
apiVersion: apps/v1 # for versions before 1.8.0 use apps/v1beta1
kind: Deployment
metadata:
name: nginx-deployment-basic
labels:
app: nginx
spec:
replicas: 2
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.7.9 # replace it with your exactly <image_name:tags>
ports:
- containerPort: 80
---
apiVersion: autoscaling.alibabacloud.com/v1beta1
kind: CronHorizontalPodAutoscaler
metadata:
labels:
controller-tools.k8s.io: "1.0"
name: cronhpa-nginx-deployment-basic
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: nginx-deployment-basic
jobs:
- name: "scale-down"
schedule: "* * 23 * * *"
targetSize: 1
- name: "scale-up"
schedule: "* * 17 * * *"
targetSize: 2

View File

@ -0,0 +1,186 @@
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 0
template:
metadata:
labels:
k8s-app: metrics-server
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
image: k8s.gcr.io/metrics-server/metrics-server:v0.4.4
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 4443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
periodSeconds: 10
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100

View File

@ -0,0 +1,17 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: bookstore-details
namespace: default
spec:
rules:
- host: bookstore.luffy.com
http:
paths:
- path: /details
pathType: Prefix
backend:
service:
name: details
port:
number: 9080

View File

@ -0,0 +1,24 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: bookstore
namespace: default
spec:
rules:
- host: bookstore.luffy.com
http:
paths:
- path: /reviews
pathType: Prefix
backend:
service:
name: reviews
port:
number: 9080
- path: /details
pathType: Prefix
backend:
service:
name: details
port:
number: 9080

View File

@ -0,0 +1,19 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: bookstore-reviews
namespace: default
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /reviews/$1
spec:
rules:
- host: bookstore.luffy.com
http:
paths:
- path: /api/reviews/(.*)
pathType: Prefix
backend:
service:
name: reviews
port:
number: 9080

View File

@ -0,0 +1,22 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: details
labels:
app: details
spec:
replicas: 1
selector:
matchLabels:
app: details
template:
metadata:
labels:
app: details
spec:
containers:
- name: details
image: docker.io/istio/examples-bookinfo-details-v1:1.16.2
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9080

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: details
labels:
app: details
spec:
ports:
- port: 9080
name: http
selector:
app: details

View File

@ -0,0 +1,28 @@
## 多path转发示例
### 目标
myblog.pod.opsbase.cn → 172.21.51.143 ↓
/foo/aaa service1:4200/foo/aaa
/bar service2:8080
/ myblog:80/
### 命令
```bash
kubectl apply -f detail.dpl.yml
kubectl apply -f detail.svc.yml
kubectl apply -f reviews.dpl.yml
kubectl apply -f reviews.svc.yml
```
## URL重写
目标:
bookstore.luffy.com → 172.21.51.67 ↓
/api/reviews -> reviews service
/details -> details service

View File

@ -0,0 +1,25 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: reviews
labels:
app: reviews
spec:
replicas: 1
selector:
matchLabels:
app: reviews
template:
metadata:
labels:
app: reviews
spec:
containers:
- name: reviews
image: docker.io/istio/examples-bookinfo-reviews-v3:1.16.2
imagePullPolicy: IfNotPresent
env:
- name: LOG_DIR
value: "/tmp/logs"
ports:
- containerPort: 9080

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: reviews
labels:
app: reviews
spec:
ports:
- port: 9080
name: http
selector:
app: reviews

View File

@ -0,0 +1,26 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: myblog
namespace: test
annotations: # 添加nginx参数
nginx.ingress.kubernetes.io/force-ssl-redirect: "false" # 同时支持http/https
nginx.ingress.kubernetes.io/proxy-body-size: 1000m
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.org/client-max-body-size: 1000m
spec:
rules:
- host: myblog.opsbase.cn
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: myblog
port:
number: 80
tls:
- hosts:
- myblog.opsbase.cn
secretName: tls-myblog

View File

@ -0,0 +1,17 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: cmdb-ing
namespace: test
spec:
rules:
- host: cmdb.pod.opsbase.cn
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: cmdb-svc
port:
number: 80

View File

@ -0,0 +1,23 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: myblog
namespace: test
spec:
rules:
- host: myblog.pod.opsbase.cn
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: myblog
port:
number: 80
tls:
- hosts:
- myblog.pod.opsbase.cn
secretName: tls-pod.opsbase.cn

View File

@ -0,0 +1,296 @@
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx-configuration
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tcp-services
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
name: udp-services
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: nginx-ingress-clusterrole
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- "extensions"
- "networking.k8s.io"
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
- "networking.k8s.io"
resources:
- ingresses/status
verbs:
- update
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: nginx-ingress-role
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
# Defaults to "<election-id>-<ingress-class>"
# Here: "<ingress-controller-leader>-<nginx>"
# This has to be adapted if you change either parameter
# when launching the nginx-ingress-controller.
- "ingress-controller-leader-nginx"
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: nginx-ingress-role-nisa-binding
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nginx-ingress-role
subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: nginx-ingress-clusterrole-nisa-binding
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nginx-ingress-clusterrole
subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-ingress-controller
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
template:
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
annotations:
prometheus.io/port: "10254"
prometheus.io/scrape: "true"
spec:
hostNetwork: true #添加为host模式
# wait up to five minutes for the drain of connections
terminationGracePeriodSeconds: 300
serviceAccountName: nginx-ingress-serviceaccount
#serviceAccount: kube-dns
#serviceAccountName: kube-dns
nodeSelector:
ingress: "true"
containers:
- name: nginx-ingress-controller
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.30.0
args:
- /nginx-ingress-controller
- --configmap=$(POD_NAMESPACE)/nginx-configuration
- --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
- --udp-services-configmap=$(POD_NAMESPACE)/udp-services
- --publish-service=$(POD_NAMESPACE)/ingress-nginx
- --annotations-prefix=nginx.ingress.kubernetes.io
securityContext:
allowPrivilegeEscalation: true
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
# www-data -> 101
runAsUser: 101
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
---
apiVersion: v1
kind: LimitRange
metadata:
name: ingress-nginx
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
spec:
limits:
- min:
memory: 90Mi
cpu: 100m
type: Container

View File

@ -0,0 +1,20 @@
-----BEGIN CERTIFICATE-----
MIIDMTCCAhmgAwIBAgIJAOEPd8tPOqd3MA0GCSqGSIb3DQEBCwUAMC8xFTATBgNV
BAMMDCoub3BzYmFzZS5jbjEWMBQGA1UECgwNaW5ncmVzcy1uZ2lueDAeFw0yMjAz
MjUxODE5MzlaFw0zMDAzMjMxODE5MzlaMC8xFTATBgNVBAMMDCoub3BzYmFzZS5j
bjEWMBQGA1UECgwNaW5ncmVzcy1uZ2lueDCCASIwDQYJKoZIhvcNAQEBBQADggEP
ADCCAQoCggEBAKDY390pDMhPaO8U3Lxt4BUnA20A7nIcCTBCu7pHdOAXUua2Sfje
OMaKjx+SYA1T2I6fvS830vwiPLGZTIPK9YXS9tL/Zd8NWvxqfRJhKVbGjgTxi0Rv
ARhAmbnFwed+DQIs+oDZgrhrpIYZIEmD82RWEFPtYTcPs/Tf5Vn8QVE6dOUP8NpF
Ck84gp2W1qjnSMF84K6ESBYOvFi76feOGZ5OwGwS9U2DeLDp5ceKwBzwXiqRugMR
HyACgrBFFK+BSkq8duKdKej4SEwVO2gI4HHFXBwpLtpo2iAv5/dA/+nxo0xWeflj
FwTgxxzwtmfKilR9YZxpss2QWibnBFmn7MMCAwEAAaNQME4wHQYDVR0OBBYEFBMO
VbR8Y4g9wqgZQ6I4TVmEn7+hMB8GA1UdIwQYMBaAFBMOVbR8Y4g9wqgZQ6I4TVmE
n7+hMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAJWTdX6WTsdi5ZnD
3eFqcfAaJAqyALgJaDh+DEL+Ke9pgRlJDF9//iO95JfDfuLu17Vv0pngQZfabP/f
L9ui308uEiEpwbpqFus+Q2k6uzYMzVk1RDT81fgPtMk1g13dCnzibhn3U0fS0k1o
AYsEqKk4RG0wvNWS+/XYGEAjE6mgw79lkeQpChWYRyA6H/nZBc+tsTx+lEf8Edsl
VW3iDpOl00rRoOtvBWqEGVV+RD5EcNcCFwCb0wPFKs2N24T7E0PqXrRzaX3beeo9
BdBfNjc0GOC4kmxkMtnHRnVxtKOzeNwa8u/aToo3dko8a2TtgdioBT3u5jPLY7Pz
AawEAaI=
-----END CERTIFICATE-----

View File

@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCg2N/dKQzIT2jv
FNy8beAVJwNtAO5yHAkwQru6R3TgF1Lmtkn43jjGio8fkmANU9iOn70vN9L8Ijyx
mUyDyvWF0vbS/2XfDVr8an0SYSlWxo4E8YtEbwEYQJm5xcHnfg0CLPqA2YK4a6SG
GSBJg/NkVhBT7WE3D7P03+VZ/EFROnTlD/DaRQpPOIKdltao50jBfOCuhEgWDrxY
u+n3jhmeTsBsEvVNg3iw6eXHisAc8F4qkboDER8gAoKwRRSvgUpKvHbinSno+EhM
FTtoCOBxxVwcKS7aaNogL+f3QP/p8aNMVnn5YxcE4Mcc8LZnyopUfWGcabLNkFom
5wRZp+zDAgMBAAECggEAOscl/365whVqxjt8cotAYNpx4Qp/GEnwfadVTLxCFIXR
cKfankwuuTb3GFV4Lkaek3gCPVgMDMFCJrBbiqnHUREHy5EzG/CYeDc931KMNb63
NU1DVE2wO7mXs5B1zG9+t3XSUPWrVFNZuvtBljvW3KqqqtDLPsDJRUnwWRLal6DN
o2RM0wUmAEdnsXicCLN76QDSyDpgDbHzZe2lI3zgKIm1p15DUHh3HZLTlY4ER1IS
nq4TLXX9raHHVUY9pvwiiOhoRUsNHL/WScHpC5Co7K1lRWpok7egZtrtsbz6on/C
kSThCWnrTUcHeUdX5M0DHb7A5X3OdaRkNJlD7ya3CQKBgQDSCwgRjT4uDlS+Nr3V
v89IIxpk/tqNNrwEtrXr2VsVJeW5dpReo/2Tn4bD8zGE2+V8tLq10jCoVzbhjPZI
rdgDdi1F6Yk/CWDdFP00w0h0FDbJSXOcSoFuQcE5tLKqcr4pFEnwGKwHmQ+CrsO9
hrWpjAQk2IF+R39ZF1fP3YRZTQKBgQDECkXIaVff/X0jhGaMBNEEtR+1ePE/5qUy
EgP+NnbKu4p+W8g1ridk90jNcYTQWJDqXgF/W+Oa7d878OP6i69PhTO1rJTGDPUv
h7COCnbajwYigwM9263IbwujQORWLTyuJiO2E0QAteFxaHK380wZrLF6YFE32p77
bXXogtLWTwKBgCdhGBcqUvERkMPEwZyUhIkyw50RxunYzDFaDAt2ycPSQhqeZ7zC
pCUMMJkGPE97ZrAVtjeme6bkCw8IfZgst6YWfvBvk2K1IGryp3I/9pKEw6zDT6CK
u29vdomaHjEkqBBNlHsmNQKLqMPIfjxHSEHMVW6PBOAnCXIrlTQMhOa1AoGACLzQ
eWtAkGoy7qdTDsCUNFqCTEtUrV1xVXb6GJWC2+xZ8uHSXZoihVyEMSvzGHoqE1gX
Rv7oeRpLDdfhwWQCb0/nixxjESS4tOKeWuZaf8lKI7WlKlelOj7AzQjyZGjlwHlQ
tCfNqx5wKubV3h8I8EgDLQon9I5Y8aniTHewP/8CgYEApo2rh+JpeGWSQrYG8d3a
n4DtmGVQe5OI70s5K4k6nHgHe5kUtz7NT5Iog71gqQCNX9CjusS6X4M16prYr7oV
yu8mC7M8DhXDZHFe76grD7PVKT0MlTTb6VVxQk1KBWIzYCVP1fMd1wQNLSHpADsX
1MsxsQ1KU6Wdom0yCMzGSdg=
-----END PRIVATE KEY-----

View File

@ -0,0 +1,17 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: hz-jenkins
namespace: jenkins
spec:
rules:
- host: hz-jenkins.pod.opsbase.cn
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: jenkins
port:
number: 8080

View File

@ -0,0 +1,118 @@
apiVersion: v1
kind: Namespace
metadata:
name: jenkins
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: jenkins
namespace: jenkins
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-storage
resources:
requests:
storage: 200Gi
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: jenkins
namespace: jenkins
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: jenkins-crb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: jenkins
namespace: jenkins
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: jenkins-master
namespace: jenkins
spec:
replicas: 1
selector:
matchLabels:
devops: jenkins-master
template:
metadata:
labels:
devops: jenkins-master
spec:
serviceAccount: jenkins #Pod 需要使用的服务账号
initContainers:
- name: fix-permissions
image: busybox
command: ["sh", "-c", "chown -R 1000:1000 /var/jenkins_home"]
securityContext:
privileged: true
volumeMounts:
- name: jenkinshome
mountPath: /var/jenkins_home
containers:
- name: jenkins
# image: jenkinsci/blueocean:1.25.2
image: jenkinsci/blueocean:latest
imagePullPolicy: IfNotPresent
ports:
- name: http #Jenkins Master Web 服务端口
containerPort: 8080
- name: slavelistener #Jenkins Master 供未来 Slave 连接的端口
containerPort: 50000
volumeMounts:
- name: jenkinshome
mountPath: /var/jenkins_home
env:
- name: JAVA_OPTS
value: "-Xms4096m -Xmx5120m -Duser.timezone=Asia/Shanghai -Dhudson.model.DirectoryBrowserSupport.CSP="
volumes:
- name: jenkinshome
persistentVolumeClaim:
claimName: jenkins
---
apiVersion: v1
kind: Service
metadata:
name: jenkins
namespace: jenkins
spec:
ports:
- name: http
port: 8080
targetPort: 8080
- name: slavelistener
port: 50000
targetPort: 50000
type: ClusterIP
selector:
devops: jenkins-master
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jenkins-web
namespace: jenkins
spec:
ingressClassName: nginx
rules:
- host: jenkins.pod.opsbase.cn
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: jenkins
port:
number: 8080

View File

@ -0,0 +1,12 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: jenkins
namespace: jenkins
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-storage
resources:
requests:
storage: 200Gi

View File

@ -0,0 +1,133 @@
pipeline {
// 指定由Master还是node节点执行任务
agent {label 'master'}
// agent {label 'jnlp-slave'}
parameters {
branchFilter: 'origin/(.*)',
defaultValue: 'master',
name: 'GIT_BRANCH',
quickFilterEnabled: false,
selectedValue: 'NONE',
sortMode: 'NONE',
tagFilter: '*',
type: 'PT_BRANCH_TAG'
}
options {
buildDiscarder(logRotator(numToKeepStr: '10'))
timeout(time: 10, unit: 'MINUTES') // 默认10秒超时
}
environment {
// PROJECT = "${JOB_BASE_NAME##*.}"
PROJECT = "${JOB_BASE_NAME}"
HARBOR_URL="harbor.opsbase.cn/public"
// 使用凭证保存钉钉接口token信息
DINGTALK_CREDS = credentials('dingtalk')
}
stages {
stage('printenv') {
steps {
echo '打印环境变量'
sh 'printenv'
script{
env.GIT_LOG = ""
env.BUILD_TASKS = ""
env.imageTag = "None"
}
}
}
stage('checkout') {
steps {
// 通过流水线语法自定义生成检出语句
checkout(
[
$class: 'GitSCM',
branches: [[name: '*/${GIT_BRANCH}']],
//branches: [[name: '*/develop']],
extensions: [
[$class: 'CheckoutOption', timeout: 120],
[$class: 'CloneOption', depth: 1, noTags: false, reference: '', shallow: true ,timeout: 60]],
userRemoteConfigs: [[credentialsId: 'gitee', url: '${GIT_URL}']]
]
)
// checkout([$class: 'GitSCM', branches: [[name: '*/${GIT_BRANCH}']], extensions: [[$class: 'CloneOption', depth: 1, noTags: false, reference: '', shallow: true]], userRemoteConfigs: [[url: '${GIT_URL}']]])
// updateGitlabCommitStatus(name: env.STAGE_NAME, state: 'success')
script{
sh "git log --oneline -n 1 > gitlog.file"
env.GIT_LOG = readFile("gitlog.file").trim()
env.imageTag = sh (script: 'git rev-parse --short HEAD ${GIT_COMMIT}', returnStdout: true).trim()
env.BUILD_TASKS = "\n" + env.STAGE_NAME
}
// updateGitlabCommitStatus(name: env.STAGE_NAME, state: 'success')
}
}
stage('build-image') {
steps {
retry(2) { sh 'docker build . -t ${HARBOR_URL}/${PROJECT}:${imageTag}' }
}
}
stage('push-image') {
steps {
retry(2) { sh 'docker push ${HARBOR_URL}/${PROJECT}:${imageTag}'}
// updateGitlabCommitStatus(name: env.STAGE_NAME, state: 'success')
script{
env.BUILD_TASKS += "\t=>\t" + env.STAGE_NAME
}
}
}
stage('deploy') {
steps {
sh "sed -i 's#{{IMAGE_URL}}#${HARBOR_URL}/${PROJECT}:${imageTag}#g' manifests/*"
timeout(time: 1, unit: 'MINUTES') {
sh "kubectl apply -f manifests/"
}
// updateGitlabCommitStatus(name: env.STAGE_NAME, state: 'success')
script{
env.BUILD_TASKS += "\t=>\t" + env.STAGE_NAME
}
}
}
}
post {
success {
sh """
curl -s 'https://oapi.dingtalk.com/robot/send?access_token=${DINGTALK_CREDS_PSW}' \
-H 'Content-Type: application/json' \
-d '{
"msgtype": "markdown",
"markdown": {
"title":"${JOB_BASE_NAME}",
"text": "任务构建成功 😄 \n**项目名称**: ${JOB_BASE_NAME}\n \n**构建分支**: ${GIT_BRANCH}\n \n**Git log**: ${GIT_LOG}\n \n**构建任务**:\n ${BUILD_TASKS}\n \n**构建地址**: ${RUN_DISPLAY_URL}"
}
}'
"""
}
failure {
sh """
curl -s 'https://oapi.dingtalk.com/robot/send?access_token=${DINGTALK_CREDS_PSW}' \
-H 'Content-Type: application/json' \
-d '{
"msgtype": "markdown",
"markdown": {
"title":"${JOB_BASE_NAME}",
"text": "任务构建失败 ❌ \n**项目名称**: ${JOB_BASE_NAME}\n \n**构建分支**: ${GIT_BRANCH}\n \n**Git log**: ${GIT_LOG}\n \n**构建任务**:\n ${BUILD_TASKS}\n \n**构建地址**: ${RUN_DISPLAY_URL}"
}
}'
"""
}
always {
echo '执行完毕 !'
}
}
}

View File

@ -0,0 +1,40 @@
pipeline {
// 指定由Master还是node节点执行任务
agent {label 'master'}
environment {
PROJECT = 'myblog'
}
stages {
stage('printenv') {
steps {
echo '打印环境变量'
sh 'printenv'
}
}
stage('Checkout') {
steps {
// 通过流水线语法自定义生成检出语句
checkout([$class: 'GitSCM', branches: [[name: '*/${GIT_BRANCH}']], extensions: [], userRemoteConfigs: [[url: '${GIT_URL}']]])
}
}
stage('Build-image') {
steps {
sh 'docker build . -t myblog:latest -f Dockerfile'
}
}
stage('Send-dingtalk') {
steps {
sh """
// 添加钉钉机器人并将请求ip加入白名单
curl 'https://oapi.dingtalk.com/robot/send?access_token=b6d0c30412ad11a9c33debc5c2245ffe95abf234079a65a62134d531dd6befe4' \
-H 'Content-Type: application/json' \
-d '{"msgtype": "text",
"text": {
"content": "${JOB_BASE_NAME} 镜像构建成功!"
}
}'
"""
}
}
}
}

View File

@ -0,0 +1,100 @@
pipeline {
agent { label '172.21.51.68'}
options {
buildDiscarder(logRotator(numToKeepStr: '10'))
disableConcurrentBuilds()
timeout(time: 20, unit: 'MINUTES')
gitLabConnection('gitlab')
}
environment {
IMAGE_REPO = "172.21.51.143:5000/demo/myblog"
DINGTALK_CREDS = credentials('dingTalk')
TAB_STR = "\n \n&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"
}
stages {
stage('printenv') {
steps {
script{
sh "git log --oneline -n 1 > gitlog.file"
env.GIT_LOG = readFile("gitlog.file").trim()
}
sh 'printenv'
}
}
stage('checkout') {
steps {
checkout scm
updateGitlabCommitStatus(name: env.STAGE_NAME, state: 'success')
script{
env.BUILD_TASKS = env.STAGE_NAME + "√..." + env.TAB_STR
}
}
}
stage('build-image') {
steps {
retry(2) { sh 'docker build . -t ${IMAGE_REPO}:${GIT_COMMIT}'}
updateGitlabCommitStatus(name: env.STAGE_NAME, state: 'success')
script{
env.BUILD_TASKS += env.STAGE_NAME + "√..." + env.TAB_STR
}
}
}
stage('push-image') {
steps {
retry(2) { sh 'docker push ${IMAGE_REPO}:${GIT_COMMIT}'}
updateGitlabCommitStatus(name: env.STAGE_NAME, state: 'success')
script{
env.BUILD_TASKS += env.STAGE_NAME + "√..." + env.TAB_STR
}
}
}
stage('deploy') {
steps {
sh "sed -i 's#{{IMAGE_URL}}#${IMAGE_REPO}:${GIT_COMMIT}#g' manifests/*"
timeout(time: 1, unit: 'MINUTES') {
sh "kubectl apply -f manifests/"
}
updateGitlabCommitStatus(name: env.STAGE_NAME, state: 'success')
script{
env.BUILD_TASKS += env.STAGE_NAME + "√..." + env.TAB_STR
}
}
}
}
post {
success {
echo 'Congratulations!'
sh """
curl 'https://oapi.dingtalk.com/robot/send?access_token=${DINGTALK_CREDS_PSW}' \
-H 'Content-Type: application/json' \
-d '{
"msgtype": "markdown",
"markdown": {
"title":"myblog",
"text": "😄👍 构建成功 👍😄 \n**项目名称**luffy \n**Git log**: ${GIT_LOG} \n**构建分支**: ${BRANCH_NAME} \n**构建地址**${RUN_DISPLAY_URL} \n**构建任务**${BUILD_TASKS}"
}
}'
"""
}
failure {
echo 'Oh no!'
sh """
curl 'https://oapi.dingtalk.com/robot/send?access_token=${DINGTALK_CREDS_PSW}' \
-H 'Content-Type: application/json' \
-d '{
"msgtype": "markdown",
"markdown": {
"title":"myblog",
"text": "😖❌ 构建失败 ❌😖 \n**项目名称**luffy \n**Git log**: ${GIT_LOG} \n**构建分支**: ${BRANCH_NAME} \n**构建地址**${RUN_DISPLAY_URL} \n**构建任务**${BUILD_TASKS}"
}
}'
"""
}
always {
echo 'I will always say Hello again!'
}
}
}

View File

@ -0,0 +1,21 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: admin
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: admin
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin
namespace: kubernetes-dashboard

View File

@ -0,0 +1,303 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
type: NodePort
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.2.0
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.6
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}

View File

@ -0,0 +1,174 @@
---
apiVersion: installer.kubesphere.io/v1alpha1
kind: ClusterConfiguration
metadata:
name: ks-installer
namespace: kubesphere-system
labels:
version: v3.2.1
spec:
persistence:
storageClass: "" # If there is no default StorageClass in your cluster, you need to specify an existing StorageClass here.
authentication:
jwtSecret: "" # Keep the jwtSecret consistent with the Host Cluster. Retrieve the jwtSecret by executing "kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret" on the Host Cluster.
local_registry: "" # Add your private registry address if it is needed.
# dev_tag: "" # Add your kubesphere image tag you want to install, by default it's same as ks-install release version.
etcd:
monitoring: false # Enable or disable etcd monitoring dashboard installation. You have to create a Secret for etcd before you enable it.
endpointIps: localhost # etcd cluster EndpointIps. It can be a bunch of IPs here.
port: 2379 # etcd port.
tlsEnable: true
common:
core:
console:
enableMultiLogin: true # Enable or disable simultaneous logins. It allows different users to log in with the same account at the same time.
port: 30880
type: NodePort
# apiserver: # Enlarge the apiserver and controller manager's resource requests and limits for the large cluster
# resources: {}
# controllerManager:
# resources: {}
redis:
enabled: false
volumeSize: 2Gi # Redis PVC size.
openldap:
enabled: false
volumeSize: 2Gi # openldap PVC size.
minio:
volumeSize: 20Gi # Minio PVC size.
monitoring:
# type: external # Whether to specify the external prometheus stack, and need to modify the endpoint at the next line.
endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090 # Prometheus endpoint to get metrics data.
GPUMonitoring: # Enable or disable the GPU-related metrics. If you enable this switch but have no GPU resources, Kubesphere will set it to zero.
enabled: false
gpu: # Install GPUKinds. The default GPU kind is nvidia.com/gpu. Other GPU kinds can be added here according to your needs.
kinds:
- resourceName: "nvidia.com/gpu"
resourceType: "GPU"
default: true
es: # Storage backend for logging, events and auditing.
# master:
# volumeSize: 4Gi # The volume size of Elasticsearch master nodes.
# replicas: 1 # The total number of master nodes. Even numbers are not allowed.
# resources: {}
# data:
# volumeSize: 20Gi # The volume size of Elasticsearch data nodes.
# replicas: 1 # The total number of data nodes.
# resources: {}
logMaxAge: 7 # Log retention time in built-in Elasticsearch. It is 7 days by default.
elkPrefix: logstash # The string making up index names. The index name will be formatted as ks-<elk_prefix>-log.
basicAuth:
enabled: false
username: ""
password: ""
externalElasticsearchUrl: ""
externalElasticsearchPort: ""
alerting: # (CPU: 0.1 Core, Memory: 100 MiB) It enables users to customize alerting policies to send messages to receivers in time with different time intervals and alerting levels to choose from.
enabled: false # Enable or disable the KubeSphere Alerting System.
# thanosruler:
# replicas: 1
# resources: {}
auditing: # Provide a security-relevant chronological set of recordsrecording the sequence of activities happening on the platform, initiated by different tenants.
enabled: false # Enable or disable the KubeSphere Auditing Log System.
# operator:
# resources: {}
# webhook:
# resources: {}
devops: # (CPU: 0.47 Core, Memory: 8.6 G) Provide an out-of-the-box CI/CD system based on Jenkins, and automated workflow tools including Source-to-Image & Binary-to-Image.
enabled: false # Enable or disable the KubeSphere DevOps System.
# resources: {}
jenkinsMemoryLim: 2Gi # Jenkins memory limit.
jenkinsMemoryReq: 1500Mi # Jenkins memory request.
jenkinsVolumeSize: 8Gi # Jenkins volume size.
jenkinsJavaOpts_Xms: 512m # The following three fields are JVM parameters.
jenkinsJavaOpts_Xmx: 512m
jenkinsJavaOpts_MaxRAM: 2g
events: # Provide a graphical web console for Kubernetes Events exporting, filtering and alerting in multi-tenant Kubernetes clusters.
enabled: false # Enable or disable the KubeSphere Events System.
# operator:
# resources: {}
# exporter:
# resources: {}
# ruler:
# enabled: true
# replicas: 2
# resources: {}
logging: # (CPU: 57 m, Memory: 2.76 G) Flexible logging functions are provided for log query, collection and management in a unified console. Additional log collectors can be added, such as Elasticsearch, Kafka and Fluentd.
enabled: false # Enable or disable the KubeSphere Logging System.
containerruntime: docker
logsidecar:
enabled: true
replicas: 2
# resources: {}
metrics_server: # (CPU: 56 m, Memory: 44.35 MiB) It enables HPA (Horizontal Pod Autoscaler).
enabled: false # Enable or disable metrics-server.
monitoring:
storageClass: "" # If there is an independent StorageClass you need for Prometheus, you can specify it here. The default StorageClass is used by default.
# kube_rbac_proxy:
# resources: {}
# kube_state_metrics:
# resources: {}
# prometheus:
# replicas: 1 # Prometheus replicas are responsible for monitoring different segments of data source and providing high availability.
# volumeSize: 20Gi # Prometheus PVC size.
# resources: {}
# operator:
# resources: {}
# adapter:
# resources: {}
# node_exporter:
# resources: {}
# alertmanager:
# replicas: 1 # AlertManager Replicas.
# resources: {}
# notification_manager:
# resources: {}
# operator:
# resources: {}
# proxy:
# resources: {}
gpu: # GPU monitoring-related plug-in installation.
nvidia_dcgm_exporter: # Ensure that gpu resources on your hosts can be used normally, otherwise this plug-in will not work properly.
enabled: false # Check whether the labels on the GPU hosts contain "nvidia.com/gpu.present=true" to ensure that the DCGM pod is scheduled to these nodes.
# resources: {}
multicluster:
clusterRole: none # host | member | none # You can install a solo cluster, or specify it as the Host or Member Cluster.
network:
networkpolicy: # Network policies allow network isolation within the same cluster, which means firewalls can be set up between certain instances (Pods).
# Make sure that the CNI network plugin used by the cluster supports NetworkPolicy. There are a number of CNI network plugins that support NetworkPolicy, including Calico, Cilium, Kube-router, Romana and Weave Net.
enabled: false # Enable or disable network policies.
ippool: # Use Pod IP Pools to manage the Pod network address space. Pods to be created can be assigned IP addresses from a Pod IP Pool.
type: none # Specify "calico" for this field if Calico is used as your CNI plugin. "none" means that Pod IP Pools are disabled.
topology: # Use Service Topology to view Service-to-Service communication based on Weave Scope.
type: none # Specify "weave-scope" for this field to enable Service Topology. "none" means that Service Topology is disabled.
openpitrix: # An App Store that is accessible to all platform tenants. You can use it to manage apps across their entire lifecycle.
store:
enabled: false # Enable or disable the KubeSphere App Store.
servicemesh: # (0.3 Core, 300 MiB) Provide fine-grained traffic management, observability and tracing, and visualized traffic topology.
enabled: false # Base component (pilot). Enable or disable KubeSphere Service Mesh (Istio-based).
kubeedge: # Add edge nodes to your cluster and deploy workloads on edge nodes.
enabled: false # Enable or disable KubeEdge.
cloudCore:
nodeSelector: {"node-role.kubernetes.io/worker": ""}
tolerations: []
cloudhubPort: "10000"
cloudhubQuicPort: "10001"
cloudhubHttpsPort: "10002"
cloudstreamPort: "10003"
tunnelPort: "10004"
cloudHub:
advertiseAddress: # At least a public IP address or an IP address which can be accessed by edge nodes must be provided.
- "" # Note that once KubeEdge is enabled, CloudCore will malfunction if the address is not provided.
nodeLimit: "100"
service:
cloudhubNodePort: "30000"
cloudhubQuicNodePort: "30001"
cloudhubHttpsNodePort: "30002"
cloudstreamNodePort: "30003"
tunnelNodePort: "30004"
edgeWatcher:
nodeSelector: {"node-role.kubernetes.io/worker": ""}
tolerations: []
edgeWatcherAgent:
nodeSelector: {"node-role.kubernetes.io/worker": ""}
tolerations: []

View File

@ -0,0 +1,307 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clusterconfigurations.installer.kubesphere.io
spec:
group: installer.kubesphere.io
versions:
- name: v1alpha1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
x-kubernetes-preserve-unknown-fields: true
status:
type: object
x-kubernetes-preserve-unknown-fields: true
scope: Namespaced
names:
plural: clusterconfigurations
singular: clusterconfiguration
kind: ClusterConfiguration
shortNames:
- cc
---
apiVersion: v1
kind: Namespace
metadata:
name: kubesphere-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: ks-installer
namespace: kubesphere-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ks-installer
rules:
- apiGroups:
- ""
resources:
- '*'
verbs:
- '*'
- apiGroups:
- apps
resources:
- '*'
verbs:
- '*'
- apiGroups:
- extensions
resources:
- '*'
verbs:
- '*'
- apiGroups:
- batch
resources:
- '*'
verbs:
- '*'
- apiGroups:
- rbac.authorization.k8s.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- apiregistration.k8s.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- apiextensions.k8s.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- tenant.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- certificates.k8s.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- devops.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- monitoring.coreos.com
resources:
- '*'
verbs:
- '*'
- apiGroups:
- logging.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- jaegertracing.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- storage.k8s.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- admissionregistration.k8s.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- policy
resources:
- '*'
verbs:
- '*'
- apiGroups:
- autoscaling
resources:
- '*'
verbs:
- '*'
- apiGroups:
- networking.istio.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- config.istio.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- iam.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- notification.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- auditing.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- events.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- core.kubefed.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- installer.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- storage.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- security.istio.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- monitoring.kiali.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- kiali.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- networking.k8s.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- kubeedge.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- types.kubefed.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- monitoring.kubesphere.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- application.kubesphere.io
resources:
- '*'
verbs:
- '*'
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ks-installer
subjects:
- kind: ServiceAccount
name: ks-installer
namespace: kubesphere-system
roleRef:
kind: ClusterRole
name: ks-installer
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: ks-installer
namespace: kubesphere-system
labels:
app: ks-install
spec:
replicas: 1
selector:
matchLabels:
app: ks-install
template:
metadata:
labels:
app: ks-install
spec:
serviceAccountName: ks-installer
containers:
- name: installer
image: kubesphere/ks-installer:v3.2.1
imagePullPolicy: "Always"
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: 20m
memory: 100Mi
volumeMounts:
- mountPath: /etc/localtime
name: host-time
readOnly: true
volumes:
- hostPath:
path: /etc/localtime
type: ""
name: host-time

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: ks-install-svc
namespace: test
spec:
ports:
- port: 80
protocol: TCP
targetPort: 8000
selector:
app: ks-install
type: ClusterIP

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: LimitRange
metadata:
name: cpu-limit-range
namespace: test
spec:
limits:
- default:
cpu: 200m
defaultRequest:
cpu: 100m
type: Container

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: LimitRange
metadata:
name: mem-limit-range
namespace: test
spec:
limits:
- default:
memory: 1024Mi
defaultRequest:
memory: 512Mi
type: Container

View File

@ -0,0 +1,39 @@
apiVersion: v1
kind: Pod
metadata:
name: cmdb
namespace: test
labels:
component: cmdb
spec:
volumes:
- name: mysql-data
hostPath:
path: /opt/mysql/data
nodeSelector: # 使用节点选择器将Pod调度到指定label的节点
component: mysql
containers:
- name: cmdb
image: harbor.opsbase.cn/public/cmdb:latest
env:
- name: MYSQL_HOST # 指定root用户的用户名
value: "127.0.0.1"
- name: MYSQL_PASSWD
value: "123456"
ports:
- containerPort: 8000
- name: mysql
image: mysql:5.7
args:
- --character-set-server=utf8mb4
- --collation-server=utf8mb4_unicode_ci
ports:
- containerPort: 3306
env:
- name: MYSQL_ROOT_PASSWORD
value: "123456"
- name: MYSQL_DATABASE
value: "cmdb"
volumeMounts:
- name: mysql-data
mountPath: /var/lib/mysql

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Pod
metadata:
name: sshd-demo
namespace: test
spec:
nodeSelector: # 使用节点选择器将Pod调度到指定label的节点
standalone: "true"
containers:
- name: sshd-demo
image: lghost/sshd:v0.1
ports:
- containerPort: 22

View File

@ -0,0 +1,40 @@
apiVersion: v1
kind: Pod
metadata:
name: pod-lifecycle
namespace: test
labels:
component: pod-lifecycless
spec:
initContainers:
- name: init
image: busybox
command: ['sh', '-c', 'echo $(date +%s): INIT >> /loap/timing']
volumeMounts:
- mountPath: /loap
name: timing
containers:
- name: main
image: busybox
command: ['sh', '-c', 'echo $(date +%s): START >> /loap/timing;
sleep 10; echo $(date +%s): END >> /loap/timing;']
volumeMounts:
- mountPath: /loap
name: timing
livenessProbe:
exec:
command: ['sh', '-c', 'echo $(date +%s): LIVENESS >> /loap/timing']
readinessProbe:
exec:
command: ['sh', '-c', 'echo $(date +%s): READINESS >> /loap/timing']
lifecycle:
postStart:
exec:
command: ['sh', '-c', 'echo $(date +%s): POST-START >> /loap/timing']
preStop:
exec:
command: ['sh', '-c', 'echo $(date +%s): PRE-STOP >> /loap/timing']
volumes:
- name: timing
hostPath:
path: /tmp/loap

View File

@ -0,0 +1,30 @@
# 针对demo 命名空间授权
apiVersion: v1
kind: ServiceAccount
metadata:
name: pre-admin
namespace: demo
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: demo
name: pods-reader-writer
rules:
- apiGroups: [""] # "" indicates the core API group
resources: ["*"]
verbs: ["*"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: pods-reader-writer
namespace: demo
subjects:
- kind: ServiceAccount #这里可以是User,Group,ServiceAccount
name: demo-pods-admin
namespace: demo
roleRef:
kind: Role #这里可以是Role或者ClusterRole,若是ClusterRole则权限也仅限于rolebinding的内部
name: pods-reader-writer
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,22 @@
apiVersion: v1
kind: LimitRange
metadata:
name: mem-limit-range
namespace: demo
spec:
limits:
- default:
memory: 512Mi
defaultRequest:
memory: 256Mi
type: Container
---
apiVersion: v1
kind: Pod
metadata:
name: default-mem-demo
namespace: demo
spec:
containers:
- name: default-mem-demo
image: nginx:alpine

View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: Secret
metadata:
name: mysql-cmdb
namespace: test
type: Opaque
data:
DB_USER: cm9vdA==
DB_PASSWORD: MTIzNDU2
# 注意加-n参数echo -n 123456|base64

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: nacos
type: Opaque
data:
nacosPwd: elhJd2FhVGk0N1Bq
# 注意加-n参数echo -n 123456|base64

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: cmdb-svc
namespace: test
spec:
ports:
- port: 80
protocol: TCP
targetPort: 8000
selector:
app: cmdb
type: ClusterIP

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: myblog
namespace: test
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: myblog
type: ClusterIP

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-pv
namespace: test
spec:
capacity:
storage: 50Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
path: /opt/data/nfs
server: 66.94.121.23

View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-pvc
namespace: test
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi

View File

@ -0,0 +1,30 @@
apiVersion: apps/v1
kind: Deployment
metadata: # metadata是该资源的元数据name是必须的元数据项
name: nginx-nfs-test
namespace: test # 指定命名空间
spec: # spec部分是该Deployment的规则说明
replicas: 2
selector:
matchLabels:
app: nginx
template: # template定义Pod的模板这是配置的重要部分
metadata: # 定义Pod的元数据至少要顶一个labellabel的key和value可以任意指定
labels:
app: nginx
spec: # spec描述的是Pod的规则此部分定义pod中每一个容器的属性name和image是必需的
containers:
- name: nginx
image: nginx:alpine # 镜像openresty/openresty or nginx:alpine
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
volumeMounts: # 挂载容器中的目录到pvc nfs中的目录
- name: www
mountPath: /usr/share/nginx/html
volumes:
- name: www
persistentVolumeClaim: # 指定pvc资源
claimName: nfs-pvc

View File

@ -0,0 +1,55 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment-pvc
spec:
replicas: 2
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
volumeMounts:
- name: wwwroot
mountPath: /usr/share/nginx/html
ports:
- containerPort: 80
volumes:
- name: wwwroot
persistentVolumeClaim:
claimName: nginx-pvc
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nginx-pvc
# annotations: # 注释使用默认
# volume.beta.kubernetes.io/storage-class: "nfs-storage"
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 5Gi
---
apiVersion: v1
kind: Service
metadata:
name: nginx-svc
spec:
selector:
app: nginx
ports:
- name: http80
port: 80
protocol: TCP
targetPort: 80
type: ClusterIP

View File

@ -0,0 +1,49 @@
apiVersion: apps/v1
kind: StatefulSet # 每个pod申请一个独立的pvc资源
metadata:
name: nginx-nfs-deployment
labels:
app: nginx-nfs-deployment
spec:
replicas: 1
serviceName: nginx-svc
template:
metadata:
name: nginx-nfs-deployment
labels:
app: nginx-nfs-deployment
spec:
containers:
- name: nginx-nfs-deployment
image: bitnami/nginx:1.23-debian-11
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
volumeMounts:
- mountPath: /usr/share/nginx/html/
name: nginxvolume
restartPolicy: Always
volumeClaimTemplates:
- metadata:
name: nginxvolume
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-storage"
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
selector:
matchLabels:
app: nginx-nfs-deployment
---
apiVersion: v1
kind: Service
metadata:
name: nginx-svc
spec:
selector:
app: nginx-nfs-deployment
ports:
- port: 80

View File

@ -0,0 +1,114 @@
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "false" # 是否设置为默认sc
name: nfs-storage
provisioner: nfs-provisioner
volumeBindingMode: Immediate
reclaimPolicy: Delete
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-client-provisioner
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: registry.cn-beijing.aliyuncs.com/mydlq/nfs-subdir-external-provisioner:v4.0.0 # quay.io/external_storage/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: nfs-provisioner # 和Storage中provisioner保持一致便可
- name: NFS_SERVER
value: 66.94.121.23 # nfs服务器地址
- name: NFS_PATH
value: /opt/data/nfs # 共享存储目录
volumes:
- name: nfs-client-root
nfs:
server: 66.94.121.23
path: /opt/data/nfs
--- # rbac授权
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
namespace: default
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
namespace: default
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,13 @@
kind: PersistentVolumeClaim # 测试pvc申请
apiVersion: v1
metadata:
name: test-nfs-pvc
annotations:
volume.beta.kubernetes.io/storage-class: "nfs-storage"
spec:
storageClassName: "nfs-storage"
accessModes:
- ReadWriteMany # 多读写
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,42 @@
apiVersion: v1
kind: Pod
metadata:
name: cmdb
namespace: test
labels:
component: cmdb
spec:
containers:
- name: cmdb
image: harbor.opsbase.cn/public/cmdb:latest
imagePullPolicy: IfNotPresent
env:
- name: DB_HOST # 指定root用户的用户名
value: "66.94.125.73"
- name: DB_PASSWORD
value: "123456"
ports:
- containerPort: 8000
resources:
requests:
memory: 512Mi
cpu: 50m
limits:
memory: 2000Mi
cpu: 100m
livenessProbe:
httpGet:
path: /prometheus/metrics
port: 8000
scheme: HTTP
initialDelaySeconds: 60 # 容器启动后第一次执行探测是需要等待多少秒
periodSeconds: 15 # 执行探测的频率
timeoutSeconds: 2 # 探测超时时间
readinessProbe:
httpGet:
path: /prometheus/metrics
port: 8000
scheme: HTTP
initialDelaySeconds: 10
timeoutSeconds: 2
periodSeconds: 15

View File

@ -0,0 +1,42 @@
apiVersion: v1
kind: Pod
metadata:
name: myblog
namespace: test
labels:
component: myblog
spec:
containers:
- name: myblog
image: harbor.opsbase.cn/public/myblog:v1
imagePullPolicy: IfNotPresent
env:
- name: MYSQL_HOST # 指定root用户的用户名
value: "66.94.125.73"
- name: MYSQL_PASSWD
value: "123456"
ports:
- containerPort: 8002
resources:
requests:
memory: 100Mi
cpu: 50m
limits:
memory: 500Mi
cpu: 100m
livenessProbe:
httpGet:
path: /blog/index/
port: 8002
scheme: HTTP
initialDelaySeconds: 10 # 容器启动后第一次执行探测是需要等待多少秒
periodSeconds: 15 # 执行探测的频率
timeoutSeconds: 2 # 探测超时时间
readinessProbe:
httpGet:
path: /blog/index/
port: 8002
scheme: HTTP
initialDelaySeconds: 10
timeoutSeconds: 2
periodSeconds: 15

View File

@ -0,0 +1,48 @@
apiVersion: v1
kind: Pod
metadata:
name: mysql
namespace: test
labels:
component: mysql
spec:
hostNetwork: true # 声明pod的网络模式为host模式效果同docker run --net=host
volumes:
- name: mysql-data
hostPath:
path: /opt/mysql/data
nodeSelector: # 使用节点选择器将Pod调度到指定label的节点
component: mysql
containers:
- name: mysql
image: mysql:5.7
args:
- --character-set-server=utf8mb4
- --collation-server=utf8mb4_unicode_ci
ports:
- containerPort: 3306
env:
- name: MYSQL_ROOT_PASSWORD
value: "opsbase.cn"
- name: MYSQL_DATABASE
value: "cmdb"
resources:
requests:
memory: 2000Mi
cpu: 800m
limits:
memory: 4Gi
cpu: 2
readinessProbe:
tcpSocket:
port: 3306
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
tcpSocket:
port: 3306
initialDelaySeconds: 15
periodSeconds: 20
volumeMounts:
- name: mysql-data
mountPath: /var/lib/mysql

View File

Some files were not shown because too many files have changed in this diff Show More