commit 8a204467faece5ea2fce68e4be865d2f74169399 Author: {admin@attacker.club} Date: Sun Jul 21 01:29:01 2024 +0800 init diff --git a/1.docs/1.1 docker.md b/1.docs/1.1 docker.md new file mode 100644 index 0000000..d866905 --- /dev/null +++ b/1.docs/1.1 docker.md @@ -0,0 +1,73 @@ + + +# docker 部署 + +部署 + +- 在线 shell 安装 + +```bash +curl -sSL https://get.docker.com/ | sh +``` + +- yum + +```bash +## 清理老的版本 +sudo yum remove docker \ + docker-client \ + docker-client-latest \ + docker-common \ + docker-latest \ + docker-latest-logrotate \ + docker-logrotate \ + docker-engine + +## yum在线安装 +sudo yum-config-manager \ + --add-repo \ + https://download.docker.com/linux/centos/docker-ce.repo + # 添加Docker软件包源 + +sudo yum install docker-ce docker-ce-cli containerd.io + # 安装Docker CE + +systemctl start docker && systemctl enable docker +# 启动服务 +``` + +## docker 配置 + +```bash + +cat > /etc/docker/daemon.json << EOF +{ + "exec-opts": ["native.cgroupdriver=systemd"], + "graph": "/var/lib", + "log-driver": "json-file", + "log-opts": { + "max-size": "100m" + }, + "registry-mirrors": [ + "https://1lcdq5an.mirror.aliyuncs.com", + "https://mirror.ccs.tencentyun.com", + "http://hub-mirror.c.163.com" + ] +} +EOF + + + +# log日志保存大小设置为100M +# "graph": "/data/docker";指定docker默认数据路径 +# "exec-opts": ["native.cgroupdriver=systemd"], +## 调整docker Cgroup Driver为systemd和日志格式设定 + +systemctl restart docker # 重启docker + +``` diff --git a/1.docs/1.2 docker-compose.md b/1.docs/1.2 docker-compose.md new file mode 100644 index 0000000..81a7aa3 --- /dev/null +++ b/1.docs/1.2 docker-compose.md @@ -0,0 +1,19 @@ + + +# 二进制 docker-compose + +下载地址: https://github.com/docker/compose/releases + +```bash +# wget -O /usr/local/sbin/docker-compose https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m) + +curl -L https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m) -o /usr/local/sbin/docker-compose + +chmod +x /usr/local/sbin/docker-compose +docker-compose version # 查看docker-compose版本 +``` diff --git a/1.docs/2.1 harbor.md b/1.docs/2.1 harbor.md new file mode 100644 index 0000000..2d81d18 --- /dev/null +++ b/1.docs/2.1 harbor.md @@ -0,0 +1,49 @@ + + +# harbor 镜像仓库 + +```bash +wget -c https://github.com/goharbor/harbor/releases/download/v2.3.4/harbor-offline-installer-v2.3.4.tgz + +tar zxvf harbor-offline-installer*.tgz +cd harbor +cp harbor.yml.tmpl harbor.yml +grep hostname harbor.yml +# 修改 hostname地址,http,https配置 +./install.sh # 执行安装脚本 + +## compose启动 +docker-compose down +docker-compose up -d + + +## 登录私有仓库 dockerhub +docker login +## 登录私有仓库 +docker login harbor.opsbase.cn + +``` + +## 更新证书 +```bash +# 如果证书 +cd harbor/ +docker cp harbor.enterx.cc.key nginx:/etc/cert/server.key +docker cp harbor.enterx.cc_bundle.pem nginx:/etc/cert/server.crt +docker-compose down && docker-compose up -d +``` + +## tag 使用 + +```bash +docker tag myblog:v1 harbor.opsbase.cn/public/myblog:v1 +docker push harbor.opsbase.cn/public/myblog:v1 +# 打tag 推送到harbor +docker pull harbor.opsbase.cn/public/myblog:v1 +# 拉取镜像到本地 +``` diff --git a/1.docs/kubernets-install.sh b/1.docs/kubernets-install.sh new file mode 100644 index 0000000..eecc70c --- /dev/null +++ b/1.docs/kubernets-install.sh @@ -0,0 +1,348 @@ +#!/bin/bash + +# bash kubernets-install.sh master +# bash kubernets-install.sh node + +# 指定 Kubernetes 版本; 参考https://github.com/kubernetes/kubernetes/releases +custom_version="1.27.4" + +read -p "是否修改Kubernetes版本? 当前为: $custom_version (y/n) " custom_version + +# 根据用户选择设置kubernetes_version变量 +if [ "$custom_version" == "y" ]; then + read -p "请输入自定义的Kubernetes版本: " custom_kubernetes_version + kubernetes_version="$custom_kubernetes_version" +fi + +# 输出选择的Kubernetes版本 +echo "选择的Kubernetes版本是: $kubernetes_version" + +# 判断传递的参数,如果没有传递或传递的是错误参数,则默认安装master节点 +node_type=${1:-"master"} + +# 脚本用途说明 +cat </dev/null 2>&1; then + echo "已检测到已安装的 Kubernetes。" + read -p "是否卸载已存在的 Kubernetes?(y/n): " uninstall_choice + if [[ $uninstall_choice = "y" || $uninstall_choice = "Y" ]]; then + uninstall_kubernetes + else + echo "已取消安装。" + exit 0 + fi + fi +} +# 卸载 Kubernetes +uninstall_kubernetes() { + echo "正在卸载 Kubernetes..." + + case $os in + ubuntu) + uninstall_kubernetes_ubuntu + ;; + centos) + uninstall_kubernetes_centos + ;; + amazon_linux) + uninstall_kubernetes_centos + ;; + *) + echo "不支持的操作系统。" + exit 1 + ;; + esac + + echo "Kubernetes 已成功卸载。" +} + +# 获取操作系统信息 +get_os_info() { + if [ -f /etc/os-release ]; then + . /etc/os-release + if [[ $ID = "ubuntu" ]]; then + os="ubuntu" + elif [[ $ID = "centos" ]]; then + os="centos" + elif [[ $ID = "amzn" ]]; then + os="amazon_linux" + fi + elif [ -f /etc/redhat-release ]; then + if grep -q "CentOS Linux release 7" /etc/redhat-release; then + os="centos" + fi + fi +} + +# 卸载 Kubernetes(Ubuntu) +uninstall_kubernetes_ubuntu() { + echo "正在卸载 Kubernetes..." + if command -v kubeadm &>/dev/null; then + kubeadm reset -f + else + echo "kubeadm 未找到,无法执行重置操作。请手动重置 Kubernetes。" + fi + if command -v kubectl &>/dev/null; then + kubectl delete -f $flannel + kubectl delete -f $calico + apt remove -y kubeadm kubelet kubectl containerd + rm -rf /etc/kubernetes /var/lib/etcd /var/lib/kubelet + else + echo "kubectl 未找到,无法执行删除操作。请手动删除相关资源。" + fi +} + +# 卸载 Kubernetes(CentOS) +uninstall_kubernetes_centos() { + echo "正在卸载 Kubernetes..." + if command -v kubectl &>/dev/null; then + kubectl delete -f $flannel + kubectl delete -f $calico + yum --debuglevel=1 remove -y kubeadm kubelet kubectl containerd bash-completion + yum autoremove -y + rm -rf /etc/kubernetes /var/lib/etcd /var/lib/kubelet + + else + echo "kubectl 未找到,无法执行删除操作。请手动删除相关资源。" + fi + +} + +# 关闭并禁用防火墙(Ubuntu、CentOS) +disable_firewall() { + echo "正在关闭并禁用防火墙..." + if [[ $os = "ubuntu" ]]; then + ufw disable + elif [[ $os = "centos" || $os = "amazon_linux" ]]; then + systemctl stop firewalld + systemctl disable firewalld + # 清空iptables策略 + iptables -F + iptables -X + iptables -Z + iptables -F -t nat + iptables -X -t nat + iptables -Z -t nat + iptables -P INPUT ACCEPT + if [ -s /etc/selinux/config ]; then + setenforce 0 + sed -i 's/^SELINUX=.*/SELINUX=disabled/g' /etc/selinux/config + fi + fi +} + +# 关闭并禁用 Swap +disable_swap() { + echo "正在关闭并禁用 Swap..." + swapoff -a + sed -i '/swap/d' /etc/fstab +} + +# 优化内核参数 +optimize_kernel() { + echo "正在优化内核参数..." + sysctl_file="/etc/sysctl.d/kubernetes.conf" + # echo "net.bridge.bridge-nf-call-ip6tables = 1" >$sysctl_file + # echo "net.bridge.bridge-nf-call-iptables = 1" >>$sysctl_file + echo "net.ipv4.ip_forward=1" >>$sysctl_file + echo "vm.max_map_count=262144" >>$sysctl_file + sysctl -p $sysctl_file +} + +# 禁用透明大页 +disable_transparent_hugepage() { + echo "禁用透明大页..." + thp_file="/etc/systemd/system/disable-thp.service" + echo "[Unit]" >$thp_file + echo "Description=Disable Transparent Huge Pages (THP)" >>$thp_file + echo "DefaultDependencies=no" >>$thp_file + echo "After=local-fs.target" >>$thp_file + echo "Before=apparmor.service" >>$thp_file + echo "" >>$thp_file + echo "[Service]" >>$thp_file + echo "Type=oneshot" >>$thp_file + echo "ExecStart=/bin/sh -c 'echo never > /sys/kernel/mm/transparent_hugepage/enabled && echo never > /sys/kernel/mm/transparent_hugepage/defrag'" >>$thp_file + echo "" >>$thp_file + echo "[Install]" >>$thp_file + echo "WantedBy=multi-user.target" >>$thp_file + chmod 664 $thp_file + systemctl daemon-reload + systemctl enable disable-thp + systemctl start disable-thp +} + +# 安装 kubeadm、kubelet 和 kubectl +install_kubernetes() { + echo "正在安装 kubeadm、kubelet 和 kubectl(版本:$kubernetes_version)..." + + if [[ $os = "ubuntu" ]]; then + apt update + apt install -y apt-transport-https ca-certificates curl bridge-utils + modprobe br_netfilter # 加载所需的内核模块 + curl -fsSL $apt_repository/doc/apt-key.gpg | apt-key add - + echo "deb $apt_repository kubernetes-xenial main" | tee /etc/apt/sources.list.d/kubernetes.list + + apt update + apt install -y kubeadm=$kubernetes_version-00 kubelet=$kubernetes_version-00 kubectl=$kubernetes_version-00 + elif [[ $os = "centos" || $os = "amazon_linux" ]]; then + cat </etc/yum.repos.d/kubernetes.repo +[kubernetes] +name=Kubernetes +baseurl=${yum_repository}/yum/repos/kubernetes-el7-x86_64/ +enabled=1 +gpgcheck=0 +repo_gpgcheck=0 +gpgkey=${yum_repository}/yum/doc/yum-key.gpg +${yum_repository}/yum/doc/rpm-package-key.gpg +EOF + + yum --debuglevel=1 install -y kubeadm-$kubernetes_version kubelet-$kubernetes_version kubectl-$kubernetes_version + systemctl enable kubelet + + echo "添加bash-completion 自动补全" + yum install bash-completion -y + source /usr/share/bash-completion/bash_completion + source <(kubectl completion bash) + echo "source <(kubectl completion bash)" >>~/.bashrc + fi +} + +# 安装 Containerd +install_containerd() { + echo "正在安装 Containerd..." + if [[ $os = "centos" || $os = "amazon_linux" ]]; then + + yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo + yum --debuglevel=1 install -y containerd + elif [[ $os = "ubuntu" ]]; then + apt install -y containerd + fi + mkdir -p /etc/containerd + # 生成默认配置 + containerd config default >/etc/containerd/config.toml + # 配置 systemd cgroup 驱动程序 + sed -i 's#SystemdCgroup = false#SystemdCgroup = true#' /etc/containerd/config.toml + sed -i "s#registry.k8s.io#${docker_image_repository}#" /etc/containerd/config.toml + systemctl restart containerd + systemctl enable containerd +} + +# 执行 kubeadm init 并复制 kubeconfig 文件 +initialize_kubernetes_cluster() { + if command -v kubeadm &>/dev/null; then + kubeadm reset -f + else + echo "kubeadm 未找到,无法执行重置操作。请手动重置 Kubernetes。" + exit 1 + fi + + echo "正在执行 kubeadm init..." + kubeadm init --kubernetes-version=v${kubernetes_version} \ + --image-repository=${docker_image_repository} \ + --service-cidr=10.96.0.0/16 \ + --pod-network-cidr=10.244.0.0/16 \ + -v=5 + + # --kubernetes-version 指定要安装的Kubernetes版本 + # --image-repository=registry.k8s.io 容器镜像仓库默认地址 + # --service-cidr Kubernetes Service的IP地址范围 + # --pod-network-cidr Kubernetes Pod的IP地址范围 + # --control-plane-endpoint=test-k8s-lb.opsbase.cn:6443 控制平面终结点地址,用于在高可用集群中指定负载均衡器的地址。 + echo "已成功执行 kubeadm init。" + # ctr 查看镜像list + ctr image ls + echo "正在复制 kubeconfig 文件..." + mkdir -p $HOME/.kube + \cp /etc/kubernetes/admin.conf $HOME/.kube/config + chown $(id -u):$(id -g) $HOME/.kube/config + echo "kubeconfig 文件已复制到 $HOME/.kube/config。" +} + +# 安装网络组件(Flannel) +install_network_plugin_flannel() { + echo "正在安装 Flannel 网络组件..." + echo $flannel + kubectl apply -f $flannel +} + +# 安装网络组件(Calico) +install_network_plugin_calico() { + echo "正在安装 Calico 网络组件..." + kubectl create -f $calico +} + +# 主函数 +main() { + select_country + get_os_info + check_root_user + check_kubernetes_installed + disable_firewall + disable_swap + disable_transparent_hugepage + install_kubernetes + install_containerd + optimize_kernel + if [[ "$node_type" = "master" ]]; then + initialize_kubernetes_cluster + install_network_plugin_flannel + # 如果想使用 Calico 网络组件,注释掉上面的 "flannel" 函数,然后取消"calico" 行的注释 + # install_network_plugin_calico + else + echo "slave节点,跳过集群初始化操作。" + fi + +} + +# 主函数 +main diff --git a/2.docker/confluence.dockerfile b/2.docker/confluence.dockerfile new file mode 100644 index 0000000..d0cbb1b --- /dev/null +++ b/2.docker/confluence.dockerfile @@ -0,0 +1,9 @@ +FROM cptactionhank/atlassian-confluence:7.4.0 + +USER root + +# 将代理破解包加入容器 +COPY "atlassian-agent-v1.2.3/atlassian-agent.jar" /opt/atlassian/confluence/ + +# 设置启动加载代理包 +RUN echo 'export CATALINA_OPTS="-javaagent:/opt/atlassian/confluence/atlassian-agent.jar ${CATALINA_OPTS}"' >> /opt/atlassian/confluence/bin/setenv.sh \ No newline at end of file diff --git a/2.docker/confluence.sh b/2.docker/confluence.sh new file mode 100644 index 0000000..29f0bac --- /dev/null +++ b/2.docker/confluence.sh @@ -0,0 +1,20 @@ +docker run -d --name confluence +### +# @Author: Logan.Li +# @Gitee: https://gitee.com/attacker +# @email: admin@attacker.club +# @Date: 2022-12-10 22:27:24 +# @LastEditTime: 2023-09-28 13:34:20 +# @Description: +### +--restart always \ + -p 8090:8090 \ + -e TZ="Asia/Shanghai" \ + -v /home/confluence:/var/atlassian/confluence \ + confluence:v1 + +docker cp mysql-connector-java-5.1.48-bin.jar confluence:/opt/atlassian/confluence/lib +# cp数据库驱动 + +docker exec -it confluence java -jar /opt/atlassian/confluence/atlassian-agent.jar -p conf -m pp@pangshare.com -n pangshare -o https://www.pangshare.com -s B37H-XJIY-BCSR-FZQQ +# diff --git a/2.docker/django_python.dockerfile b/2.docker/django_python.dockerfile new file mode 100644 index 0000000..7e22d12 --- /dev/null +++ b/2.docker/django_python.dockerfile @@ -0,0 +1,17 @@ +FROM python:3.8-alpine + +WORKDIR /home +COPY . /home + +RUN pip install -i http://mirrors.aliyun.com/pypi/simple --trusted-host mirrors.aliyun.com -r requirements.txt +RUN rm -rf /home/env + +# RUN python manage.py makemigrations && python manage.py migrate +# CMD [ "python", "./manage.py", "runserver", "0.0.0.0:8000"] +RUN chmod +x run.sh +EXPOSE 8000 +CMD ["/bin/sh","run.sh"] +# 容器启动时默认执行的命令 + +# docker build -t lghost/bind9:latest . # build images +# docker push lghost/bind9:latest # 推送到dockerhub diff --git a/2.docker/docker-install b/2.docker/docker-install new file mode 100644 index 0000000..fa908c6 --- /dev/null +++ b/2.docker/docker-install @@ -0,0 +1,102 @@ +#!/bin/bash +# + +# 定义日志文件路径 +LOG_FILE="/var/log/docker_install.log" + +# 定义日志记录函数 +function logger() { + local log_level="$1" + local message="$2" + local color_code="" + + case "$log_level" in + "error" | "red") + color_code="\e[1;31m" + ;; + "warning" | "yellow") + color_code="\e[1;33m" + ;; + "success" | "green") + color_code="\e[1;32m" + ;; + "info" | "blue") + color_code="\e[1;34m" + ;; + esac + + echo -e "${color_code}${message}\e[0m" + echo "$message" >> "$LOG_FILE" +} + +# 判断是否 root 用户 +if [ $(id -u) -ne 0 ]; then + logger "error" "########## 错误:此脚本必须以 root 身份运行! ##########" + exit 1 +fi + +# 环境检查 +if which getenforce && [ $(getenforce) == "Enforcing" ]; then + logger "info" "信息:关闭 SELINUX" + setenforce 0 + sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config +fi + +# 定义安装函数 +function install_docker() { + local install_method="$1" + + if [ "$install_method" == "local" ]; then + if [ -f "docker-24.0.6.tar.gz" ]; then + logger "info" "信息:发现本地压缩包,进行解压安装" + tar zxvf docker-24.0.6.tar.gz + mv docker/docker.service /etc/systemd/system/docker.service + cp docker/* /usr/bin + else + logger "warning" "警告:未找到本地压缩包,无法进行本地安装" + return 1 + fi + elif [ "$install_method" == "curl" ]; then + logger "info" "信息:尝试通过 curl 安装 Docker" + curl -fsSL https://get.docker.com | sh + elif [ "$install_method" == "yum" ]; then + logger "warning" "警告:尝试通过 Yum 安装 Docker" + sudo yum remove docker \ + docker-client \ + docker-client-latest \ + docker-common \ + docker-latest \ + docker-latest-logrotate \ + docker-logrotate \ + docker-engine -y + + sudo yum-config-manager \ + --add-repo \ + https://download.docker.com/linux/centos/docker-ce.repo + + sudo yum install docker-ce -y + else + logger "error" "错误:无效的安装方法" + return 1 + fi + + systemctl daemon-reload + systemctl enable docker + systemctl start docker + logger "success" "成功:安装 Docker 并启动服务($install_method 方式)" +} + +# 尝试本地安装 +if install_docker "local"; then + exit 0 +fi + +# 尝试 curl 安装 +if install_docker "curl"; then + exit 0 +fi + +# 尝试 yum 安装 +install_docker "yum" + +logger "info" "======================= 安装完成 =======================" \ No newline at end of file diff --git a/2.docker/docker-tools.sh b/2.docker/docker-tools.sh new file mode 100644 index 0000000..d21beb8 --- /dev/null +++ b/2.docker/docker-tools.sh @@ -0,0 +1,103 @@ +#!/bin/bash + +# Docker 镜像与容器操作脚本 +# 导出 Docker 镜像至指定路径 +export_image() { + # 参数检查 + if [ -z "$1" ] || [ -z "$2" ]; then + echo "用法: 导出镜像 <镜像名称> <导出路径>" + return 1 + fi + + local image_name="$1" + local export_path="$2" + + echo "正在导出镜像 '$image_name' 至 '$export_path'..." + docker save -o "$export_path" "$image_name" + if [ $? -eq 0 ]; then + echo "镜像导出成功。" + else + echo "镜像导出失败。" + return 1 + fi +} + +# 将运行中的Docker容器导出为新的镜像 +export_container_as_image() { + # 参数检查 + if [ -z "$1" ] || [ -z "$2" ]; then + echo "用法: 导出容器为镜像 <容器名称或ID> <新镜像名称>" + return 1 + fi + + local container_name_or_id="$1" + local new_image_name="$2" + + echo "正在将运行中的容器 '$container_name_or_id' 导出为镜像 '$new_image_name'..." + docker commit "$container_name_or_id" "$new_image_name" + if [ $? -eq 0 ]; then + echo "容器已成功导出为镜像。" + else + echo "容器导出为镜像失败。" + return 1 + fi +} + +# 导入 Docker 镜像文件 +import_image() { + # 参数检查 + if [ -z "$1" ]; then + echo "用法: 导入镜像 <导入文件路径>" + return 1 + fi + + local import_path="$1" + + if [ ! -f "$import_path" ]; then + echo "文件 '$import_path' 不存在。" + return 1 + fi + + echo "正在从 '$import_path' 导入 Docker 镜像..." + docker load -i "$import_path" + if [ $? -eq 0 ]; then + echo "镜像导入成功。" + else + echo "镜像导入失败。" + return 1 + fi +} + +# 打印帮助信息 +print_help() { + echo -e "\n操作指南:\n" + echo "+---------------------------+---------------------------------------------------------+" + echo "| 功能 | 命令格式 |" + echo "+---------------------------+---------------------------------------------------------+" + echo "| export_image | ./docker-tools.sh 导出镜像 <镜像名称> <导出路径> |" + echo "| export_container | ./docker-tools.sh 导出容器为镜像 <容器名或ID> <新镜像名>|" + echo "| import_image | ./docker-tools.sh 导入镜像 <导入文件路径> |" + echo "+---------------------------+---------------------------------------------------------+" + echo "| 注: | 使用前请确保已赋予权限,如 'chmod +x docker-tools.sh' |" + echo "+---------------------------+---------------------------------------------------------+" +} + + +# 主程序:解析命令行参数,执行对应功能或打印帮助 +case "$1" in + export_image) + shift + export_image "$@" + ;; + import_image) + shift + import_image "$@" + ;; + export_container_as_image) + shift + export_container "$@" + ;; + *) + print_help + ;; +esac diff --git a/2.docker/etl.sh b/2.docker/etl.sh new file mode 100644 index 0000000..939787a --- /dev/null +++ b/2.docker/etl.sh @@ -0,0 +1,57 @@ +#!/bin/bash +### +# @Author: Logan.Li +# @Gitee: https://gitee.com/attacker +# @email: admin@attacker.club +# @Date: 2023-10-19 01:52:35 + # @LastEditTime: 2023-10-19 10:11:16 +# @Description: +### + +DOLPHINSCHEDULER_VERSION=3.2.0 +# Initialize the database, make sure database already exists +docker run -d --name dolphinscheduler-tools \ + -e DATABASE="postgresql" \ + -e SPRING_DATASOURCE_URL="jdbc:postgresql://localhost:5432/" \ + -e SPRING_DATASOURCE_USERNAME=root \ + -e SPRING_DATASOURCE_PASSWORD="Wu20@250" \ + -e SPRING_JACKSON_TIME_ZONE="UTC" \ + --net host \ + apache/dolphinscheduler-tools:"${DOLPHINSCHEDULER_VERSION}" tools/bin/upgrade-schema.sh +# Starting DolphinScheduler service +docker run -d --name dolphinscheduler-master \ + -e DATABASE="postgresql" \ + -e SPRING_DATASOURCE_URL="jdbc:postgresql://localhost:5432/dolphinscheduler" \ + -e SPRING_DATASOURCE_USERNAME=root \ + -e SPRING_DATASOURCE_PASSWORD="Wu20@250" \ + -e SPRING_JACKSON_TIME_ZONE="UTC" \ + -e REGISTRY_ZOOKEEPER_CONNECT_STRING="localhost:2181" \ + --net host \ + -d apache/dolphinscheduler-master:"${DOLPHINSCHEDULER_VERSION}" +docker run -d --name dolphinscheduler-worker \ + -e DATABASE="postgresql" \ + -e SPRING_DATASOURCE_URL="jdbc:postgresql://localhost:5432/dolphinscheduler" \ + -e SPRING_DATASOURCE_USERNAME=root \ + -e SPRING_DATASOURCE_PASSWORD="Wu20@250" \ + -e SPRING_JACKSON_TIME_ZONE="UTC" \ + -e REGISTRY_ZOOKEEPER_CONNECT_STRING="localhost:2181" \ + --net host \ + -d apache/dolphinscheduler-worker:"${DOLPHINSCHEDULER_VERSION}" +docker run -d --name dolphinscheduler-api \ + -e DATABASE="postgresql" \ + -e SPRING_DATASOURCE_URL="jdbc:postgresql://localhost:5432/dolphinscheduler" \ + -e SPRING_DATASOURCE_USERNAME=root \ + -e SPRING_DATASOURCE_PASSWORD="Wu20@250" \ + -e SPRING_JACKSON_TIME_ZONE="UTC" \ + -e REGISTRY_ZOOKEEPER_CONNECT_STRING="localhost:2181" \ + --net host \ + -d apache/dolphinscheduler-api:"${DOLPHINSCHEDULER_VERSION}" +docker run -d --name dolphinscheduler-alert-server \ + -e DATABASE="postgresql" \ + -e SPRING_DATASOURCE_URL="jdbc:postgresql://localhost:5432/dolphinscheduler" \ + -e SPRING_DATASOURCE_USERNAME=root \ + -e SPRING_DATASOURCE_PASSWORD="Wu20@250" \ + -e SPRING_JACKSON_TIME_ZONE="UTC" \ + -e REGISTRY_ZOOKEEPER_CONNECT_STRING="localhost:2181" \ + --net host \ + -d apache/dolphinscheduler-alert-server:"${DOLPHINSCHEDULER_VERSION}" diff --git a/2.docker/frp.sh b/2.docker/frp.sh new file mode 100644 index 0000000..0ddb345 --- /dev/null +++ b/2.docker/frp.sh @@ -0,0 +1,6 @@ + +docker run --restart=always --network host \ +-d -v /etc/frps.ini:/etc/frp/frps.ini --name frps lghost/frps:0.48 + +docker run --restart=always --network host \ +-d -v /etc/frpc.ini:/etc/frp/frpc.ini --name frpc lghost/frpc:0.48 \ No newline at end of file diff --git a/2.docker/frpc.dockerfile b/2.docker/frpc.dockerfile new file mode 100644 index 0000000..50d9f27 --- /dev/null +++ b/2.docker/frpc.dockerfile @@ -0,0 +1,15 @@ +FROM alpine:latest + +ENV FRP_VERSION 0.48.0 + +RUN mkdir -p /etc/frp \ + && wget --no-check-certificate -c https://github.com/fatedier/frp/releases/download/v${FRP_VERSION}/frp_${FRP_VERSION}_linux_amd64.tar.gz \ + && tar zxvf frp_${FRP_VERSION}_linux_amd64.tar.gz \ + && cp frp_${FRP_VERSION}_linux_amd64/frpc /usr/bin/ \ + && cp frp_${FRP_VERSION}_linux_amd64/frpc.ini /etc/frp \ + && rm -rf frp_* + +ENTRYPOINT /usr/bin/frpc -c /etc/frp/frpc.ini + +# docker build -t lghost/frpc . +# docker run --restart=always --network host -d -v /etc/frpc.ini:/etc/frp/frpc.ini --name frpc lghost/frpc:0.48 \ No newline at end of file diff --git a/2.docker/frpc.ini b/2.docker/frpc.ini new file mode 100644 index 0000000..d51cda2 --- /dev/null +++ b/2.docker/frpc.ini @@ -0,0 +1,38 @@ +# frpc.ini (FRPC configuration) +[common] +server_addr = 21.136.xxx.xxx +server_port = 7000 +token = ************ + +[open] +type = http +local_ip = 192.168.0.254 +local_port = 80 +remote_port = 80 +custom_domains = open.opsbase.cn + +[test] +type = http +local_ip = 192.168.0.254 +local_port = 80 +remote_port = 80 +custom_domains = test.opsbase.cn + +[tcp4430] +type = tcp +local_ip = 10.10.10.209 +local_port = 4430 +remote_port = 4430 + + +[ssh] +type = tcp +local_ip = 192.168.0.254 +local_port = 22 +remote_port = 60022 + +[RDP] +type = tcp +local_ip = 192.168.0.234 +local_port = 3389 +remote_port = 63389 \ No newline at end of file diff --git a/2.docker/frps.dockerfile b/2.docker/frps.dockerfile new file mode 100644 index 0000000..f5c757a --- /dev/null +++ b/2.docker/frps.dockerfile @@ -0,0 +1,15 @@ +FROM alpine:latest + +ENV FRP_VERSION 0.48.0 + +RUN mkdir -p /etc/frp \ + && wget --no-check-certificate -c https://github.com/fatedier/frp/releases/download/v${FRP_VERSION}/frp_${FRP_VERSION}_linux_amd64.tar.gz \ + && tar zxvf frp_${FRP_VERSION}_linux_amd64.tar.gz \ + && cp frp_${FRP_VERSION}_linux_amd64/frps /usr/bin/ \ + && cp frp_${FRP_VERSION}_linux_amd64/frps.ini /etc/frp \ + && rm -rf frp_* + +ENTRYPOINT /usr/bin/frps -c /etc/frp/frps.ini + +# docker build -t lghost/frps -f frps.dockerfile . +# docker run --restart=always --network host -d -v /etc/frps.ini:/etc/frp/frps.ini --name frps lghost/frps:0.48 \ No newline at end of file diff --git a/2.docker/frps.ini b/2.docker/frps.ini new file mode 100644 index 0000000..f034bc4 --- /dev/null +++ b/2.docker/frps.ini @@ -0,0 +1,12 @@ +# frps.ini (FRPS configuration) +[common] +bind_addr=0.0.0.0 +bind_port = 7000 +token=************ +dashboard_port = 7500 +dashboard_user = root +dashboard_pwd = password1 +vhost_http_port = 80 +vhost_https_port = 443 +tcp_mux = ture +privilege_mode = ture \ No newline at end of file diff --git a/2.docker/gitlab.sh b/2.docker/gitlab.sh new file mode 100644 index 0000000..c64cf73 --- /dev/null +++ b/2.docker/gitlab.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +docker stop gitlab +docker rm gitlab +docker run -d \ + --p 8443:443 --p 8800:80 --p 2222:22 \ + --name gitlab \ + --restart always \ + --privileged=true \ + --volume /data/gitlab/config:/etc/gitlab \ + --volume /data/gitlab/logs:/var/log/gitlab \ + --volume /data/gitlab/data:/var/opt/gitlab \ + gitlab/gitlab-ce + # 指定版本: gitlab/gitlab-ce:12.3.5-ce.0 + +# --privileged=true 让容器获取宿主机root权限 +# /etc/gitlab/gitlab.rb # external_url地址更新 +# gitlab-ctl reconfigure # 载入配置 +# docker exec -it gitlab cat /etc/gitlab/initial_root_password #查看密码 \ No newline at end of file diff --git a/2.docker/gogs.sh b/2.docker/gogs.sh new file mode 100644 index 0000000..4e333ad --- /dev/null +++ b/2.docker/gogs.sh @@ -0,0 +1,15 @@ +#!/bin/bash +### +# @Author: Logan.Li +# @Gitee: https://gitee.com/attacker +# @email: admin@attacker.club +# @Date: 2023-09-29 23:32:45 +# @LastEditTime: 2023-09-29 23:38:15 +# @Description: +### + +docker run -d \ + --name=gogs \ + --restart=always \ + -p 3022:22 -p 3000:3000 \ + -v /data/docker/gogs:/data gogs/gogs diff --git a/2.docker/holmes_api_tomcat.dockerfile b/2.docker/holmes_api_tomcat.dockerfile new file mode 100644 index 0000000..1af99b8 --- /dev/null +++ b/2.docker/holmes_api_tomcat.dockerfile @@ -0,0 +1,18 @@ +FROM tomcat:8.5.24-jre8 +MAINTAINER Logan "admin@attacker.club" +# 大数据数据质量 后端api服务 + +ENV TZ=Asia/Shanghai +ENV TOMCAT_HOME=/usr/local/tomcat +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +##RUN rm $TOMCAT_HOME/bin/catalina.sh +COPY doc/catalina.sh $TOMCAT_HOME/bin/catalina.sh + +##RUN rm $TOMCAT_HOME/conf/server.xml +COPY doc/server.xml $TOMCAT_HOME/conf/server.xml + +RUN rm -rf $TOMCAT_HOME/webapps/* +COPY holmes-web/target/holmes-web-1.0-SNAPSHOT.war $TOMCAT_HOME/webapps/holmes.war +RUN chmod +x /usr/local/tomcat/bin/catalina.sh +EXPOSE 8080 \ No newline at end of file diff --git a/2.docker/holmes_ui_npm.dockerfile b/2.docker/holmes_ui_npm.dockerfile new file mode 100644 index 0000000..b115cb9 --- /dev/null +++ b/2.docker/holmes_ui_npm.dockerfile @@ -0,0 +1,17 @@ +FROM nginx:1.11.10-alpine +MAINTAINER Logan "admin@attacker.club" +# 前端静态 + +ENV TZ=Asia/Shanghai +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone +RUN mkdir -p /home/jollychic/www + +COPY dist /home/jollychic/www/dist +# npm编译后的静态文件 放nginx镜像里面 + +COPY config/nginx.conf /etc/nginx/conf.d/default.conf +COPY init.sh /home/jollychic/init.sh + +WORKDIR /home/jollychic +EXPOSE 80 +CMD chmod +x init.sh && ./init.sh \ No newline at end of file diff --git a/2.docker/jenkins.sh b/2.docker/jenkins.sh new file mode 100644 index 0000000..2cf6878 --- /dev/null +++ b/2.docker/jenkins.sh @@ -0,0 +1,16 @@ +#!/bin/bash +### + # @Author: admin@attacker.club + # @Date: 2022-12-10 22:27:24 + # @LastEditTime: 2023-02-15 15:42:49 + # @Description: +### + +docker run --name jenkins \ +--restart=always -d \ +-p 8080:8080 \ +-v /home/jenkins/data:/var/jenkins_home \ +jenkins/jenkins:lts + +docker exec -it jenkins cat /var/jenkins_home/secrets/initialAdminPassword +# 查看解锁密钥 \ No newline at end of file diff --git a/2.docker/jumpserver.sh b/2.docker/jumpserver.sh new file mode 100644 index 0000000..9de9d3c --- /dev/null +++ b/2.docker/jumpserver.sh @@ -0,0 +1,42 @@ +#!/bin/bash +### + # @Author: admin@attacker.club + # @Date: 2022-12-10 22:27:24 + # @LastEditTime: 2023-02-28 20:48:41 + # @Description: +### + +if [ ! "$SECRET_KEY" ]; then + SECRET_KEY=`cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 50`; + echo "SECRET_KEY=$SECRET_KEY" >> ~/.bashrc; + echo $SECRET_KEY; +else + echo $SECRET_KEY; +fi +if [ ! "$BOOTSTRAP_TOKEN" ]; then + BOOTSTRAP_TOKEN=`cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 16`; + echo "BOOTSTRAP_TOKEN=$BOOTSTRAP_TOKEN" >> ~/.bashrc; + echo $BOOTSTRAP_TOKEN; +else + echo $BOOTSTRAP_TOKEN; +fi + + + + +docker run --name jms_all -d \ + --restart=always \ + -v /opt/jumpserver:/opt/jumpserver/data/media \ + -p 80:80 \ + -p 2222:2222 \ + -e SECRET_KEY=$SECRET_KEY \ + -e BOOTSTRAP_TOKEN=$BOOTSTRAP_TOKEN \ + -e DB_HOST=192.168.xx.xx \ + -e DB_PORT=3306 \ + -e DB_USER=jumpserver \ + -e DB_PASSWORD='xxxxx' \ + -e DB_NAME=jumpserver \ + -e REDIS_HOST=192.168.xx.xx \ + -e REDIS_PORT=6379 \ + -e REDIS_PASSWORD=123456 \ + jumpserver/jms_all \ No newline at end of file diff --git a/2.docker/kuboard.sh b/2.docker/kuboard.sh new file mode 100644 index 0000000..63e56fe --- /dev/null +++ b/2.docker/kuboard.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# 获取当前 IP 地址 +current_ip=$(hostname -I | awk '{print $1}') + +sudo docker run -d \ + --restart=unless-stopped \ + --name=kuboard \ + -p 82:80/tcp \ + -p 10081:10081/tcp \ + -e KUBOARD_ENDPOINT="http://$current_ip:82" \ + -e KUBOARD_AGENT_SERVER_TCP_PORT="10081" \ + -v /data/kuboard-data:/data \ + eipwork/kuboard:v3 \ No newline at end of file diff --git a/2.docker/mysql5.7.sh b/2.docker/mysql5.7.sh new file mode 100644 index 0000000..342dcbf --- /dev/null +++ b/2.docker/mysql5.7.sh @@ -0,0 +1,15 @@ +#!/bin/bash +### + # @Author: admin@attacker.club + # @Date: 2022-09-14 21:38:54 + # @LastEditTime: 2023-03-31 01:37:18 + # @Description: +### + +docker run -p 3306:3306 --name mysql \ +--restart always \ +-v /usr/local/docker/mysql/conf:/etc/mysql \ +-v /usr/local/docker/mysql/logs:/var/log/mysql \ +-v /usr/local/docker/mysql/data:/var/lib/mysql \ +-e MYSQL_ROOT_PASSWORD=123456 \ +-d mysql:5.7 \ No newline at end of file diff --git a/2.docker/mysql8.sh b/2.docker/mysql8.sh new file mode 100644 index 0000000..1aab66a --- /dev/null +++ b/2.docker/mysql8.sh @@ -0,0 +1,21 @@ +#!/bin/bash +### +# @Author: admin@attacker.club +# @Date: 2022-09-14 21:38:54 +# @LastEditTime: 2024-06-30 23:18:01 +# @Description: +### + +dataDir="/opt/docker-data/mysql" +yum remove mariadb* -y # 卸载mariadb +rpm -ivh https://repo.mysql.com/mysql80-community-release-el7.rpm +yum install mysql-community-client -y # 安装mysql客户端 + +password=$(cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 12) +docker run \ + --restart always \ + -d -p 3306:3306 --name mysql \ + -v $dataDir:/var/lib/mysql \ + -e MYSQL_ROOT_PASSWORD=$password \ + mysql:8 +echo "passwod: $password" >mysql.txt diff --git a/2.docker/openldap.sh b/2.docker/openldap.sh new file mode 100644 index 0000000..803e1d0 --- /dev/null +++ b/2.docker/openldap.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +## openldap服务 +password=`cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 12` +docker run \ + -d -p 389:389 -p 636:636 \ + --name ldap-service \ + --restart=always \ + --hostname openldap \ + -v /opt/docker-data/slapd/database:/var/lib/ldap \ + --volume /opt/docker-data/slapd/config:/etc/ldap/slapd.d \ + --env LDAP_ORGANISATION="Opsbase" \ + --env LDAP_DOMAIN="opsbase.cn" \ + --env LDAP_BASE_DN="dc=opsbase,dc=cn" \ + --env LDAP_ADMIN_PASSWORD=$password \ + osixia/openldap:latest + +echo "passwod: $password" > openldap.txt +# 默认管理员账号 DN:admin.opsbase.cn diff --git a/2.docker/openwrt.sh b/2.docker/openwrt.sh new file mode 100644 index 0000000..6fa7332 --- /dev/null +++ b/2.docker/openwrt.sh @@ -0,0 +1,25 @@ + +### + # @Author: Logan.Li + # @Gitee: https://gitee.com/attacker + # @email: admin@attacker.club + # @Date: 2023-07-02 00:37:17 + # @LastEditTime: 2023-07-02 00:37:28 + # @Description: + # https://supes.top/docker%E7%89%88openwrt%E6%97%81%E8%B7%AF%E7%94%B1%E5%AE%89%E8%A3%85%E8%AE%BE%E7%BD%AE%E6%95%99%E7%A8%8B/ +### +# 打开网卡混杂模式 +ip link set ens33 promisc on +# 创建网络 +docker network create -d macvlan \ +--subnet=192.168.0.0/24 \ +--gateway=192.168.0.11 -o parent=ens33 openwrt-net +docker network ls && docker network inspect openwrt-net + + +# 下载 xxx-rootfs.tar.gz, https://supes.top/?target=x86%2F64&id=generic +# 加载镜像,创建并启动容器 +docker run -it -rm --name openwrt--network openwrt-net openwrt:latest --privileged /sbin/init +docker run -it --rm --name openwrt --network openwrt-net --ip 192.168.0.2 openwrt:latest bash + + diff --git a/2.docker/php.sh b/2.docker/php.sh new file mode 100644 index 0000000..e69de29 diff --git a/2.docker/phpldapadmin.sh b/2.docker/phpldapadmin.sh new file mode 100644 index 0000000..93cee7b --- /dev/null +++ b/2.docker/phpldapadmin.sh @@ -0,0 +1,16 @@ +## https://35.xx.xx.xx:6443/ +### + # @Author: admin@attacker.club + # @Date: 2023-02-22 19:14:48 + # @LastEditTime: 2023-02-22 19:16:41 + # @Description: + +## 在浏览器输入 https://内网IP:6443 ,按下图所示步骤登录 phpLDAPadmin +## Login DN: cn=admin,dc=example,dc=org 默认管理员用户 +## Password: admin 管理员密码 +### +docker run -p 6443:443 \ + --name ldapadmin \ + --link ldap-service:ldap \ + --env PHPLDAPADMIN_LDAP_HOSTS=ldap \ + --detach osixia/phpldapadmin:0.9.0 \ No newline at end of file diff --git a/2.docker/poste-mail.sh b/2.docker/poste-mail.sh new file mode 100644 index 0000000..789df0b --- /dev/null +++ b/2.docker/poste-mail.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +docker stop mail +docker rm mail + +docker run --name "mail" \ +-p 25:25 -p 8888:80 \ +-e "HTTPS=OFF" \ +-e "DISABLE_CLAMAV=TRUE" \ +-p 110:110 -p 143:143 -p 465:465 \ +-p 587:587 -p 993:993 -p 995:995 \ +-v /etc/localtime:/etc/localtime:ro -v \ +/home/data:/data \ +-h "mail.xxx.com" \ +--restart=always -d -t analogic/poste.io + + +iptables -D INPUT -p tcp -m multiport --dport 25,110,143,465,993,587,995,1022,55557 -j ACCEPT +iptables -I INPUT -p tcp -m multiport --dport 25,110,143,465,993,587,995,1022,55557 -j ACCEPT +iptables-save \ No newline at end of file diff --git a/2.docker/python.Dockerfile-alpine b/2.docker/python.Dockerfile-alpine new file mode 100644 index 0000000..232aaa2 --- /dev/null +++ b/2.docker/python.Dockerfile-alpine @@ -0,0 +1,41 @@ +FROM python:3.9-alpine + +MAINTAINER Logan +WORKDIR /opt/app +COPY requirements.txt /opt/app + +## 国内加速源 +RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories +## 工具包 +RUN apk --no-cache add curl vim busybox-extras +## ldap依赖 +RUN apk --no-cache add gcc \ +libldap \ +libc-dev \ +openldap-dev +# python3-dev +## Pillow图片库依赖 +RUN apk --no-cache add libjpeg jpeg-dev musl-dev zlib-dev libffi-dev +# python3-dev +RUN pip --no-cache-dir install -i https://mirrors.aliyun.com/pypi/simple -r requirements.txt +COPY . /opt/app + +## 清理数据 +RUN apk del \ + gcc \ + libc-dev \ + openldap-dev +# RUN apk del libjpeg jpeg-dev musl-dev zlib-dev libffi-dev +RUN rm -rf /tmp/* /opt/app/env /opt/app/db.sqlite3 /opt/app/logs/* /root/.cache/pip /var/cache/apk/* + +EXPOSE 8000 +CMD ["/bin/sh","run.sh"] + +## 构建镜像 +# docker build -t lghost/python:3.9 . -f Dockerfile-alpine + +## 简洁启动 +# docker run -d -p8000:8000 --name cmdb lghost/python:3.9 + +## 初始化数据 +# docker exec -it cmdb python init.py diff --git a/2.docker/python.Dockerfile-centos b/2.docker/python.Dockerfile-centos new file mode 100644 index 0000000..390baa8 --- /dev/null +++ b/2.docker/python.Dockerfile-centos @@ -0,0 +1,19 @@ +FROM centos:7 +RUN rm /etc/yum.repos.d/* \ + && curl -s -o ./Centos-7.repo https://mirrors.aliyun.com/repo/Centos-7.repo \ + && curl -s -o ./epel.repo https://mirrors.aliyun.com/repo/epel-7.repo \ + && sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' Centos-7.repo \ + && yum install -y wget openssl-devel bzip2-devel expat-devel gdbm-devel readline-devel sqlite-devel python-devel \ +libffi-devel tk-devel + + +RUN wget -c https://www.python.org/ftp/python/3.9.16/Python-3.9.16.tar.xz \ + && tar xf Python*.xz \ + && cd Python-3.9.16 \ + && ./configure prefix=/usr/local/python3 \ + && make -j 2 && make altinstall && ln -s /usr/local/python3/bin/python3.9 /usr/bin/python3 + + + + +# docker build -t lghost/python:3.9 . -f Dockerfile-alpine \ No newline at end of file diff --git a/2.docker/python.Dockerfile-slim b/2.docker/python.Dockerfile-slim new file mode 100644 index 0000000..5c98372 --- /dev/null +++ b/2.docker/python.Dockerfile-slim @@ -0,0 +1,13 @@ +FROM python:3.9.13-slim +ENV PYTHONUNBUFFERED 1 + + +# RUN sed -i 's/deb.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list # 国内源 + +WORKDIR /opt/app + +RUN apt-get update \ + && apt-get install -y net-tools apt-utils \ + libtiff5-dev libjpeg8-dev zlib1g-dev +COPY requirements.txt /opt/app + diff --git a/2.docker/python.Dockerfile-ubuntu b/2.docker/python.Dockerfile-ubuntu new file mode 100644 index 0000000..5afb4a8 --- /dev/null +++ b/2.docker/python.Dockerfile-ubuntu @@ -0,0 +1,19 @@ +FROM ubuntu:22.04 + +MAINTAINER Logan +WORKDIR /opt + +## 依赖包 +RUN sed -i s@/archive.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list \ + && apt update \ + && apt install nload iftop net-tools curl git -y \ + && apt install python3 python3-pip -y + +## 清理数据 +RUN rm -rf /var/lib/apt/lists/* \ +&& apt clean \ +&& apt autoclean + +EXPOSE 8000 + +# docker build -t lghost/python3 . -f Dockerfile-ubuntu-python3 \ No newline at end of file diff --git a/2.docker/redis.sh b/2.docker/redis.sh new file mode 100644 index 0000000..bd2e916 --- /dev/null +++ b/2.docker/redis.sh @@ -0,0 +1,19 @@ +#!/bin/bash +### + # @Author: admin@attacker.club + # @Date: 2022-09-14 21:38:54 + # @LastEditTime: 2022-09-29 16:44:42 + # @Description: +### + +docker run -d \ +--name redis \ +--restart always \ +-p 6379:6379 \ +-v /home/docker-data/redis/data:/data \ +redis:latest --appendonly yes --requirepass opsbase.cn + + + +# appendonly 启动后数据持久化 +# requirepass 指定密码 \ No newline at end of file diff --git a/2.docker/sshd-centos7-dockerfile b/2.docker/sshd-centos7-dockerfile new file mode 100644 index 0000000..692f818 --- /dev/null +++ b/2.docker/sshd-centos7-dockerfile @@ -0,0 +1,17 @@ +FROM centos:7 +RUN cd /etc/yum.repos.d && mkdir bak && mv *.repo bak/ \ + && curl -s -o ./Centos-7.repo https://mirrors.aliyun.com/repo/Centos-7.repo \ + && curl -s -o ./epel.repo https://mirrors.aliyun.com/repo/epel-7.repo \ + && sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' Centos-7.repo \ + && yum clean all && yum makecache \ + && yum install -y wget vim openssh-server net-tools initscripts \ + && /usr/bin/ssh-keygen -A && mkdir /var/run/sshd && echo 'UseDNS no' >> /etc/ssh/sshd_config && sed -i -e '/pam_loginuid.so/d' /etc/pam.d/sshd \ + && yum clean all && rm -rf /var/tmp/* && rm -rf /var/cache/yum/* \ + && echo 'root:opsbase' |chpasswd + +EXPOSE 22 +# CMD ["/usr/sbin/sshd","-D"] +ENTRYPOINT ["/usr/sbin/sshd","-D"] + +# ## 构建镜像 +# docker build -t lghost/centos7-ssh:latest . -f sshd-centos7-dockerfile \ No newline at end of file diff --git a/2.docker/sshd.dockerfile b/2.docker/sshd.dockerfile new file mode 100644 index 0000000..40bce95 --- /dev/null +++ b/2.docker/sshd.dockerfile @@ -0,0 +1,36 @@ +FROM alpine + +MAINTAINER admin@attacker.club.com + +# 替换阿里云的源 +RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories + +# 更新源、安装openssh 并修改配置文件和生成key +RUN apk update && \ + apk add --no-cache openssh tzdata && rm -rf /var/cache/apk/* && \ + ssh-keygen -t dsa -P "" -f /etc/ssh/ssh_host_dsa_key && \ + ssh-keygen -t rsa -P "" -f /etc/ssh/ssh_host_rsa_key && \ + ssh-keygen -t ecdsa -P "" -f /etc/ssh/ssh_host_ecdsa_key && \ + ssh-keygen -t ed25519 -P "" -f /etc/ssh/ssh_host_ed25519_key && \ + ssh-keygen -A && \ + sed -i "/PermitRootLogin/c PermitRootLogin yes" /etc/ssh/sshd_config && \ + sed -i 's/#PasswordAuthentication yes/PasswordAuthentication yes/g' /etc/ssh/sshd_config + +# 自定义配置 +RUN echo "root:123456" | chpasswd && \ + echo > /etc/motd && \ + echo '''PS1="\[\e[37;1m\][\[\e[32;1m\]\u\[\e[37;40m\]@\[\e[34;1m\]\h \[\e[0m\]\t \[\e[35;1m\]\W\[\e[37;1m\]]\[\e[m\]/\\$" ''' >>/etc/profile + +# # 安全配置,禁止网络工具上传和下载 +RUN rm /usr/bin/wget /usr/bin/nc /usr/bin/scp /bin/ping /usr/bin/traceroute* /sbin/apk -f + + +# 开放22端口 +EXPOSE 22 +# 执行ssh启动命令 +CMD ["/usr/sbin/sshd", "-D"] + +## Build +# docker build -t lghost/sshd . -f sshd.dockerfile +## Run +# docker run --restart=unless-stopped -p 2222:22 --name sshd lghost/sshd \ No newline at end of file diff --git a/2.docker/sshd.sh b/2.docker/sshd.sh new file mode 100644 index 0000000..32a3b9f --- /dev/null +++ b/2.docker/sshd.sh @@ -0,0 +1,9 @@ +#!/bin/bash +### + # @author: 以谁为师 + # @site: opsbase.cn + # @Date: 2022-04-02 16:04:05 + # @LastEditTime: 2022-10-14 12:23:01 + # @Description: +### + diff --git a/2.docker/yearning.sh b/2.docker/yearning.sh new file mode 100644 index 0000000..b55dcb0 --- /dev/null +++ b/2.docker/yearning.sh @@ -0,0 +1,15 @@ + +## 安装指南 +https://guide.yearning.io + +## 构建镜像 +docker build -t yearning:lts . -f Dockerfile + + +## 启动 +docker run -d --name yearning \ + -p11110:8000 \ + -e MYSQL_DB=Yearning \ + -e MYSQL_USER=admin -e MYSQL_ADDR=172.16.100.138:3306 -e MYSQL_PASSWORD=xxxxxx \ + yearning:lts + diff --git a/2.docker/zabbix.sh b/2.docker/zabbix.sh new file mode 100644 index 0000000..d0be65c --- /dev/null +++ b/2.docker/zabbix.sh @@ -0,0 +1,87 @@ +#!/bin/bash +### + # @Author: admin@attacker.club + # @Date: 2022-09-29 14:35:55 + # @LastEditTime: 2023-03-06 01:14:56 + # @Description: +### + + + docker stop zabbix-mysql >2&1 + docker stop zabbix-web >2&1 + docker stop zabbix-server >2&1 + docker rm zabbix-mysql >2&1 + docker rm zabbix-web >2&1 + docker rm zabbix-server >2&1 + + ## DB服务 + dataDir="/opt/docker-data/mysql" + rm ="/opt/docker-data/mysql" + yum remove mariadb* -y # 卸载默认mariadb + rpm -ivh https://repo.mysql.com/mysql80-community-release-el7.rpm + yum install mysql-community-client -y # 安装mysql client + + + if [ ! "$rootPassword" ]; then + rootPassword=`cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 12` + zbxPassword=`cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 12` + echo "rootPassword=$rootPassword" >> ~/.bashrc; + echo "zbxPassword=$zbxPassword" >> ~/.bashrc; + fi + + + + echo "> 启动mysql" + docker run \ + --restart always \ + -d -p 3306:3306 \ + --name zabbix-mysql \ + --hostname zabbix-mysql \ + -e MYSQL_ROOT_PASSWORD=${rootPassword} \ + -e MYSQL_USER="zabbix" \ + -e MYSQL_PASSWORD=${zbxPassword} \ + -e MYSQL_DATABASE="zabbix" \ + -v $dataDir:/var/lib/mysql \ + mysql:8 --character-set-server=utf8 --collation-server=utf8_bin + + sleep 10 + ## 启动zabbix server + echo "> 启动zabbix server" + docker run -d -p 10051:10051 \ + --restart always \ + --name zabbix-server \ + --hostname zabbix-server \ + --link zabbix-mysql:mysql \ + -e DB_SERVER_HOST="mysql" \ + -e MYSQL_USER="zabbix" \ + -e MYSQL_PASSWORD="${zbxPassword}" \ + -v /etc/localtime:/etc/localtime:ro \ + -v /data/docker/zabbix/alertscripts:/usr/lib/zabbix/alertscripts \ + -v /data/docker/zabbix/externalscripts:/usr/lib/zabbix/externalscripts \ + zabbix/zabbix-server-mysql:ubuntu-6.0-latest + # zabbix/zabbix-server-mysql:latest + + + ## 启动zabbix web + echo "> 启动zabbix web" + docker run -d -p 81:8080 \ + --restart always \ + --name zabbix-web \ + --hostname zabbix-web \ + --link zabbix-mysql:mysql \ + --link zabbix-server:zabbix-server \ + -e DB_SERVER_HOST="mysql" \ + -e MYSQL_USER="zabbix" \ + -e MYSQL_PASSWORD="${zbxPassword}" \ + -e ZBX_SERVER_HOST="zabbix-server" \ + -e PHP_TZ="Asia/Shanghai" \ + zabbix/zabbix-web-nginx-mysql:6.0-alpine-latest + # zabbix/zabbix-web-nginx-mysql:latest + + sleep 3 + echo "mysql -h127.0.0.1 -uroot -p$rootPassword" > mysql.txt + echo "mysql -h127.0.0.1 -uzabbix -p$zbxPassword" >> mysql.txt + echo "http://zabbix 账号: Admin / zabbix" + ## sql添加远程账号 + # CREATE USER 'admin'@'%' ; + # GRANT ALL ON *.* TO 'admin'@'%' IDENTIFIED WITH mysql_native_password BY 'adminPwd123'; \ No newline at end of file diff --git a/3.kubernetes/Manifests/busybox.yml b/3.kubernetes/Manifests/busybox.yml new file mode 100644 index 0000000..d5292f3 --- /dev/null +++ b/3.kubernetes/Manifests/busybox.yml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: busybox + namespace: kxyyq4 +spec: + containers: + - image: busybox + command: + - sleep + - "3600" + imagePullPolicy: IfNotPresent + name: busybox +restartPolicy: Always \ No newline at end of file diff --git a/3.kubernetes/Manifests/cmdb.yml b/3.kubernetes/Manifests/cmdb.yml new file mode 100644 index 0000000..e69de29 diff --git a/3.kubernetes/Manifests/halloworld.yml b/3.kubernetes/Manifests/halloworld.yml new file mode 100644 index 0000000..a07a34a --- /dev/null +++ b/3.kubernetes/Manifests/halloworld.yml @@ -0,0 +1,20 @@ + apiVersion: apps/v1 + kind: Deployment + metadata: + name: hello-world + spec: + selector: + matchLabels: + run: load-balancer-example + replicas: 2 + template: + metadata: + labels: + run: load-balancer-example + spec: + containers: + - name: hello-world + image: registry.cn-hangzhou.aliyuncs.com/aliyun_google/google-sample-node-hello:1.0 + ports: + - containerPort: 8080 + protocol: TCP \ No newline at end of file diff --git a/3.kubernetes/Manifests/nginx.yml b/3.kubernetes/Manifests/nginx.yml new file mode 100644 index 0000000..c727d1c --- /dev/null +++ b/3.kubernetes/Manifests/nginx.yml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: # metadata是该资源的元数据,name是必须的元数据项 + name: nginx-deployment + namespace: test # 指定命名空间 + labels: + app: nginx +spec: # spec部分是该Deployment的规则说明 + replicas: 2 + selector: + matchLabels: + app: nginx + template: # template定义Pod的模板,这是配置的重要部分 + metadata: # 定义Pod的元数据,至少要顶一个label,label的key和value可以任意指定 + labels: + app: nginx + spec: # spec描述的是Pod的规则,此部分定义pod中每一个容器的属性,name和image是必需的 + containers: + - name: nginx + image: nginx:1.23-debian-11 # 提供本地nginx镜像 + ports: + - containerPort: 80 diff --git a/3.kubernetes/configmap/cmdb-configmap.yml b/3.kubernetes/configmap/cmdb-configmap.yml new file mode 100644 index 0000000..0bd5b1f --- /dev/null +++ b/3.kubernetes/configmap/cmdb-configmap.yml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: mysql-cmdb + namespace: test +data: + DB_HOST: "66.94.125.73" + DB_PORT: "63306" + DB_NAME: "cmdb" diff --git a/3.kubernetes/configmap/configmap.yml b/3.kubernetes/configmap/configmap.yml new file mode 100644 index 0000000..993ac42 --- /dev/null +++ b/3.kubernetes/configmap/configmap.yml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: myblog + namespace: test +data: + MYSQL_HOST: "66.94.125.73" + MYSQL_PORT: "63306" \ No newline at end of file diff --git a/3.kubernetes/deployment/deploy-cmdb.yml b/3.kubernetes/deployment/deploy-cmdb.yml new file mode 100644 index 0000000..0d1d24d --- /dev/null +++ b/3.kubernetes/deployment/deploy-cmdb.yml @@ -0,0 +1,82 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cmdb + namespace: test +spec: + replicas: 1 + selector: + matchLabels: + app: cmdb + template: + metadata: + labels: + app: cmdb + spec: + containers: + - name: cmdb + # image: docker.io/lghost/cmdb:latest + image: harbor.opsbase.cn/public/test.demo.python.cmdb:b6c565d + imagePullPolicy: Always + env: + - name: DB_HOST + valueFrom: + configMapKeyRef: + name: mysql-cmdb + key: DB_HOST + - name: DB_PORT + valueFrom: + configMapKeyRef: + name: mysql-cmdb + key: DB_PORT + - name: DB_USER + valueFrom: + secretKeyRef: + name: mysql-cmdb + key: DB_USER + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: mysql-cmdb + key: DB_PASSWORD + - name: REDIS_HOST + valueFrom: + configMapKeyRef: + name: redis-cmdb + key: REDIS_HOST + - name: REDIS_PORT + valueFrom: + configMapKeyRef: + name: redis-cmdb + key: REDIS_PORT + - name: REDIS_PWD + valueFrom: + secretKeyRef: + name: redis-cmdb + key: REDIS_PWD + + ports: + - containerPort: 8000 + resources: + requests: + memory: 1500Mi + cpu: 100m + limits: + memory: 35000Mi + cpu: 500m + livenessProbe: + httpGet: + path: /prometheus/metrics + port: 8000 + scheme: HTTP + initialDelaySeconds: 15 # 容器启动后第一次执行探测是需要等待多少秒 + periodSeconds: 120 # 执行探测的频率 + timeoutSeconds: 5 # 探测超时时间 + readinessProbe: + httpGet: + path: /prometheus/metrics + port: 8000 + scheme: HTTP + initialDelaySeconds: 15 + timeoutSeconds: 3 + periodSeconds: 15 diff --git a/3.kubernetes/deployment/deploy-myblog.yml b/3.kubernetes/deployment/deploy-myblog.yml new file mode 100644 index 0000000..2278d16 --- /dev/null +++ b/3.kubernetes/deployment/deploy-myblog.yml @@ -0,0 +1,65 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: myblog + namespace: test +spec: + replicas: 2 # 指定Pod副本数 + selector: # 指定Pod的选择器 + matchLabels: + app: myblog + template: + metadata: + labels: # 给Pod打label + app: myblog + spec: + containers: + - name: myblog + image: harbor.opsbase.cn/public/myblog + imagePullPolicy: IfNotPresent + env: + - name: MYSQL_HOST + valueFrom: + configMapKeyRef: + name: test-db-configmap + key: Mysql_Host + - name: MYSQL_PORT + valueFrom: + configMapKeyRef: + name: test-db-configmap + key: Mysql_PORT + - name: MYSQL_USER + valueFrom: + secretKeyRef: + name: test-db-secret + key: MYSQL_USER + - name: MYSQL_PASSWD + valueFrom: + secretKeyRef: + name: test-db-secret + key: MYSQL_PASSWD + ports: + - containerPort: 80 + resources: + requests: + memory: 100Mi + cpu: 50m + limits: + memory: 500Mi + cpu: 100m + livenessProbe: + httpGet: + path: /blog/index/ + port: 80 + scheme: HTTP + initialDelaySeconds: 10 # 容器启动后第一次执行探测是需要等待多少秒 + periodSeconds: 15 # 执行探测的频率 + timeoutSeconds: 2 # 探测超时时间 + readinessProbe: + httpGet: + path: /blog/index/ + port: 80 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 2 + periodSeconds: 15 \ No newline at end of file diff --git a/3.kubernetes/deployment/sshd-pod-svc.yml b/3.kubernetes/deployment/sshd-pod-svc.yml new file mode 100644 index 0000000..d11d50d --- /dev/null +++ b/3.kubernetes/deployment/sshd-pod-svc.yml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: dev-opsbase-ssh-svc + namespace: test +spec: + ports: + - port: 22 + protocol: TCP + targetPort: 22 + selector: + app: dev-opsbase-ssh + type: ClusterIP diff --git a/3.kubernetes/deployment/sshd-pod.yml b/3.kubernetes/deployment/sshd-pod.yml new file mode 100644 index 0000000..988f616 --- /dev/null +++ b/3.kubernetes/deployment/sshd-pod.yml @@ -0,0 +1,28 @@ +apiVersion: apps/v1 # Api接口版本 +kind: Deployment # 定义控制器 +metadata: + name: dev-opsbase-ssh # 定义deployment名称 + namespace: test +spec: + replicas: 1 # 副本数量,还需要指定副本标签与 Deployment控制器进行匹配 + selector: # 指定Pod选择器 + matchLabels: # 标签匹配方式 + app: dev-opsbase-ssh # 匹配metadata.name名称 + template: # pod容器 + metadata: # 具体信息 + labels: # 定义标签 + app: dev-opsbase-ssh # pod名称 + spec: + containers: + - name: dev-opsbase-ssh # 容器名称 + image: lghost/sshd:v0.1 # 拉取镜像 + imagePullPolicy: IfNotPresent # 镜像pull策略 + ports: + - containerPort: 22 + resources: # 限制资源 + requests: + memory: 100Mi + cpu: 50m + limits: + memory: 500Mi + cpu: 100m diff --git a/3.kubernetes/endpoint/mysql.yml b/3.kubernetes/endpoint/mysql.yml new file mode 100644 index 0000000..63a1cb8 --- /dev/null +++ b/3.kubernetes/endpoint/mysql.yml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: mysql-bos + namespace: base +spec: + ports: + - port: 3306 + targetPort: 3306 + protocol: TCP + name: mysql-bos +--- +kind: Endpoints +apiVersion: v1 +metadata: + name: mysql-bos + namespace: base +subsets: + - addresses: + - ip: 10.10.10.60 + ports: + - name: mysql-bos + port: 3306 + protocol: TCP \ No newline at end of file diff --git a/3.kubernetes/endpoint/readme.md b/3.kubernetes/endpoint/readme.md new file mode 100644 index 0000000..27a042a --- /dev/null +++ b/3.kubernetes/endpoint/readme.md @@ -0,0 +1,4 @@ +# endpoint + +使用endpoint 将容器外地址改为数据库连接: +mysql-bos.base:3306 \ No newline at end of file diff --git a/3.kubernetes/hpa-cronhpa/demo_deployment_cronhpa.yaml b/3.kubernetes/hpa-cronhpa/demo_deployment_cronhpa.yaml new file mode 100644 index 0000000..2ad6380 --- /dev/null +++ b/3.kubernetes/hpa-cronhpa/demo_deployment_cronhpa.yaml @@ -0,0 +1,41 @@ +--- +apiVersion: apps/v1 # for versions before 1.8.0 use apps/v1beta1 +kind: Deployment +metadata: + name: nginx-deployment-basic + labels: + app: nginx +spec: + replicas: 2 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.7.9 # replace it with your exactly + ports: + - containerPort: 80 +--- +apiVersion: autoscaling.alibabacloud.com/v1beta1 +kind: CronHorizontalPodAutoscaler +metadata: + labels: + controller-tools.k8s.io: "1.0" + name: cronhpa-nginx-deployment-basic +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: nginx-deployment-basic + jobs: + - name: "scale-down" + schedule: "* * 23 * * *" + targetSize: 1 + - name: "scale-up" + schedule: "* * 17 * * *" + targetSize: 2 diff --git a/3.kubernetes/hpa/components.yaml b/3.kubernetes/hpa/components.yaml new file mode 100644 index 0000000..d8d6890 --- /dev/null +++ b/3.kubernetes/hpa/components.yaml @@ -0,0 +1,186 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: metrics-server + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: system:aggregated-metrics-reader +rules: +- apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server +rules: +- apiGroups: + - "" + resources: + - pods + - nodes + - nodes/stats + - namespaces + - configmaps + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: metrics-server + name: metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + k8s-app: metrics-server + name: metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:metrics-server +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +spec: + ports: + - name: https + port: 443 + protocol: TCP + targetPort: https + selector: + k8s-app: metrics-server +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: metrics-server + strategy: + rollingUpdate: + maxUnavailable: 0 + template: + metadata: + labels: + k8s-app: metrics-server + spec: + containers: + - args: + - --cert-dir=/tmp + - --secure-port=4443 + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + image: k8s.gcr.io/metrics-server/metrics-server:v0.4.4 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /livez + port: https + scheme: HTTPS + periodSeconds: 10 + name: metrics-server + ports: + - containerPort: 4443 + name: https + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /readyz + port: https + scheme: HTTPS + periodSeconds: 10 + securityContext: + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + volumeMounts: + - mountPath: /tmp + name: tmp-dir + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: metrics-server + volumes: + - emptyDir: {} + name: tmp-dir +--- +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + labels: + k8s-app: metrics-server + name: v1beta1.metrics.k8s.io +spec: + group: metrics.k8s.io + groupPriorityMinimum: 100 + insecureSkipTLSVerify: true + service: + name: metrics-server + namespace: kube-system + version: v1beta1 + versionPriority: 100 diff --git a/3.kubernetes/ingress/demo-多路径转发及重写的实现/bookstore.details.ingress.yaml b/3.kubernetes/ingress/demo-多路径转发及重写的实现/bookstore.details.ingress.yaml new file mode 100644 index 0000000..d6dbe5e --- /dev/null +++ b/3.kubernetes/ingress/demo-多路径转发及重写的实现/bookstore.details.ingress.yaml @@ -0,0 +1,17 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: bookstore-details + namespace: default +spec: + rules: + - host: bookstore.luffy.com + http: + paths: + - path: /details + pathType: Prefix + backend: + service: + name: details + port: + number: 9080 \ No newline at end of file diff --git a/3.kubernetes/ingress/demo-多路径转发及重写的实现/bookstore.ingress.yml b/3.kubernetes/ingress/demo-多路径转发及重写的实现/bookstore.ingress.yml new file mode 100644 index 0000000..e83558f --- /dev/null +++ b/3.kubernetes/ingress/demo-多路径转发及重写的实现/bookstore.ingress.yml @@ -0,0 +1,24 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: bookstore + namespace: default +spec: + rules: + - host: bookstore.luffy.com + http: + paths: + - path: /reviews + pathType: Prefix + backend: + service: + name: reviews + port: + number: 9080 + - path: /details + pathType: Prefix + backend: + service: + name: details + port: + number: 9080 \ No newline at end of file diff --git a/3.kubernetes/ingress/demo-多路径转发及重写的实现/bookstore.reviews.ingress.yaml b/3.kubernetes/ingress/demo-多路径转发及重写的实现/bookstore.reviews.ingress.yaml new file mode 100644 index 0000000..9a79ad4 --- /dev/null +++ b/3.kubernetes/ingress/demo-多路径转发及重写的实现/bookstore.reviews.ingress.yaml @@ -0,0 +1,19 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: bookstore-reviews + namespace: default + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /reviews/$1 +spec: + rules: + - host: bookstore.luffy.com + http: + paths: + - path: /api/reviews/(.*) + pathType: Prefix + backend: + service: + name: reviews + port: + number: 9080 \ No newline at end of file diff --git a/3.kubernetes/ingress/demo-多路径转发及重写的实现/detail.dpl.yml b/3.kubernetes/ingress/demo-多路径转发及重写的实现/detail.dpl.yml new file mode 100644 index 0000000..276cf59 --- /dev/null +++ b/3.kubernetes/ingress/demo-多路径转发及重写的实现/detail.dpl.yml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: details + labels: + app: details +spec: + replicas: 1 + selector: + matchLabels: + app: details + template: + metadata: + labels: + app: details + spec: + containers: + - name: details + image: docker.io/istio/examples-bookinfo-details-v1:1.16.2 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 \ No newline at end of file diff --git a/3.kubernetes/ingress/demo-多路径转发及重写的实现/detail.svc.yml b/3.kubernetes/ingress/demo-多路径转发及重写的实现/detail.svc.yml new file mode 100644 index 0000000..fe649d5 --- /dev/null +++ b/3.kubernetes/ingress/demo-多路径转发及重写的实现/detail.svc.yml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: details + labels: + app: details +spec: + ports: + - port: 9080 + name: http + selector: + app: details + \ No newline at end of file diff --git a/3.kubernetes/ingress/demo-多路径转发及重写的实现/readme.md b/3.kubernetes/ingress/demo-多路径转发及重写的实现/readme.md new file mode 100644 index 0000000..81a26a5 --- /dev/null +++ b/3.kubernetes/ingress/demo-多路径转发及重写的实现/readme.md @@ -0,0 +1,28 @@ + +## 多path转发示例 + +### 目标 + +myblog.pod.opsbase.cn → 172.21.51.143 ↓ + /foo/aaa service1:4200/foo/aaa + /bar service2:8080 + / myblog:80/ +### 命令 + +```bash +kubectl apply -f detail.dpl.yml +kubectl apply -f detail.svc.yml + +kubectl apply -f reviews.dpl.yml +kubectl apply -f reviews.svc.yml + +``` + + +## URL重写 + +目标: + +bookstore.luffy.com → 172.21.51.67 ↓ + /api/reviews -> reviews service + /details -> details service diff --git a/3.kubernetes/ingress/demo-多路径转发及重写的实现/reviews.dpl.yml b/3.kubernetes/ingress/demo-多路径转发及重写的实现/reviews.dpl.yml new file mode 100644 index 0000000..e585123 --- /dev/null +++ b/3.kubernetes/ingress/demo-多路径转发及重写的实现/reviews.dpl.yml @@ -0,0 +1,25 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews + labels: + app: reviews +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + template: + metadata: + labels: + app: reviews + spec: + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v3:1.16.2 + imagePullPolicy: IfNotPresent + env: + - name: LOG_DIR + value: "/tmp/logs" + ports: + - containerPort: 9080 \ No newline at end of file diff --git a/3.kubernetes/ingress/demo-多路径转发及重写的实现/reviews.svc.yml b/3.kubernetes/ingress/demo-多路径转发及重写的实现/reviews.svc.yml new file mode 100644 index 0000000..491adb3 --- /dev/null +++ b/3.kubernetes/ingress/demo-多路径转发及重写的实现/reviews.svc.yml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: reviews + labels: + app: reviews +spec: + ports: + - port: 9080 + name: http + selector: + app: reviews \ No newline at end of file diff --git a/3.kubernetes/ingress/https_myblog.yml b/3.kubernetes/ingress/https_myblog.yml new file mode 100644 index 0000000..74ae222 --- /dev/null +++ b/3.kubernetes/ingress/https_myblog.yml @@ -0,0 +1,26 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: myblog + namespace: test + annotations: # 添加nginx参数 + nginx.ingress.kubernetes.io/force-ssl-redirect: "false" # 同时支持http/https + nginx.ingress.kubernetes.io/proxy-body-size: 1000m + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.org/client-max-body-size: 1000m +spec: + rules: + - host: myblog.opsbase.cn + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: myblog + port: + number: 80 + tls: + - hosts: + - myblog.opsbase.cn + secretName: tls-myblog \ No newline at end of file diff --git a/3.kubernetes/ingress/ingress_cmdb.yml b/3.kubernetes/ingress/ingress_cmdb.yml new file mode 100644 index 0000000..75ce870 --- /dev/null +++ b/3.kubernetes/ingress/ingress_cmdb.yml @@ -0,0 +1,17 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: cmdb-ing + namespace: test +spec: + rules: + - host: cmdb.pod.opsbase.cn + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: cmdb-svc + port: + number: 80 \ No newline at end of file diff --git a/3.kubernetes/ingress/ingress_myblog.yml b/3.kubernetes/ingress/ingress_myblog.yml new file mode 100644 index 0000000..e16521b --- /dev/null +++ b/3.kubernetes/ingress/ingress_myblog.yml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: myblog + namespace: test +spec: + rules: + - host: myblog.pod.opsbase.cn + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: myblog + port: + number: 80 + tls: + - hosts: + - myblog.pod.opsbase.cn + secretName: tls-pod.opsbase.cn + + \ No newline at end of file diff --git a/3.kubernetes/ingress/mandatory.yml b/3.kubernetes/ingress/mandatory.yml new file mode 100644 index 0000000..598b009 --- /dev/null +++ b/3.kubernetes/ingress/mandatory.yml @@ -0,0 +1,296 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + +--- + +kind: ConfigMap +apiVersion: v1 +metadata: + name: nginx-configuration + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: tcp-services + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: udp-services + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nginx-ingress-serviceaccount + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: nginx-ingress-clusterrole + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - "extensions" + - "networking.k8s.io" + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "extensions" + - "networking.k8s.io" + resources: + - ingresses/status + verbs: + - update + +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: nginx-ingress-role + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + # Defaults to "-" + # Here: "-" + # This has to be adapted if you change either parameter + # when launching the nginx-ingress-controller. + - "ingress-controller-leader-nginx" + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - get + +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: nginx-ingress-role-nisa-binding + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: nginx-ingress-role +subjects: + - kind: ServiceAccount + name: nginx-ingress-serviceaccount + namespace: ingress-nginx + +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: nginx-ingress-clusterrole-nisa-binding + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nginx-ingress-clusterrole +subjects: + - kind: ServiceAccount + name: nginx-ingress-serviceaccount + namespace: ingress-nginx + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-ingress-controller + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + annotations: + prometheus.io/port: "10254" + prometheus.io/scrape: "true" + spec: + hostNetwork: true #添加为host模式 + # wait up to five minutes for the drain of connections + terminationGracePeriodSeconds: 300 + serviceAccountName: nginx-ingress-serviceaccount + #serviceAccount: kube-dns + #serviceAccountName: kube-dns + nodeSelector: + ingress: "true" + containers: + - name: nginx-ingress-controller + image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.30.0 + args: + - /nginx-ingress-controller + - --configmap=$(POD_NAMESPACE)/nginx-configuration + - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services + - --udp-services-configmap=$(POD_NAMESPACE)/udp-services + - --publish-service=$(POD_NAMESPACE)/ingress-nginx + - --annotations-prefix=nginx.ingress.kubernetes.io + securityContext: + allowPrivilegeEscalation: true + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + # www-data -> 101 + runAsUser: 101 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + +--- + +apiVersion: v1 +kind: LimitRange +metadata: + name: ingress-nginx + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +spec: + limits: + - min: + memory: 90Mi + cpu: 100m + type: Container diff --git a/3.kubernetes/ingress/tls.crt b/3.kubernetes/ingress/tls.crt new file mode 100644 index 0000000..e944803 --- /dev/null +++ b/3.kubernetes/ingress/tls.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDMTCCAhmgAwIBAgIJAOEPd8tPOqd3MA0GCSqGSIb3DQEBCwUAMC8xFTATBgNV +BAMMDCoub3BzYmFzZS5jbjEWMBQGA1UECgwNaW5ncmVzcy1uZ2lueDAeFw0yMjAz +MjUxODE5MzlaFw0zMDAzMjMxODE5MzlaMC8xFTATBgNVBAMMDCoub3BzYmFzZS5j +bjEWMBQGA1UECgwNaW5ncmVzcy1uZ2lueDCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAKDY390pDMhPaO8U3Lxt4BUnA20A7nIcCTBCu7pHdOAXUua2Sfje +OMaKjx+SYA1T2I6fvS830vwiPLGZTIPK9YXS9tL/Zd8NWvxqfRJhKVbGjgTxi0Rv +ARhAmbnFwed+DQIs+oDZgrhrpIYZIEmD82RWEFPtYTcPs/Tf5Vn8QVE6dOUP8NpF +Ck84gp2W1qjnSMF84K6ESBYOvFi76feOGZ5OwGwS9U2DeLDp5ceKwBzwXiqRugMR +HyACgrBFFK+BSkq8duKdKej4SEwVO2gI4HHFXBwpLtpo2iAv5/dA/+nxo0xWeflj +FwTgxxzwtmfKilR9YZxpss2QWibnBFmn7MMCAwEAAaNQME4wHQYDVR0OBBYEFBMO +VbR8Y4g9wqgZQ6I4TVmEn7+hMB8GA1UdIwQYMBaAFBMOVbR8Y4g9wqgZQ6I4TVmE +n7+hMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAJWTdX6WTsdi5ZnD +3eFqcfAaJAqyALgJaDh+DEL+Ke9pgRlJDF9//iO95JfDfuLu17Vv0pngQZfabP/f +L9ui308uEiEpwbpqFus+Q2k6uzYMzVk1RDT81fgPtMk1g13dCnzibhn3U0fS0k1o +AYsEqKk4RG0wvNWS+/XYGEAjE6mgw79lkeQpChWYRyA6H/nZBc+tsTx+lEf8Edsl +VW3iDpOl00rRoOtvBWqEGVV+RD5EcNcCFwCb0wPFKs2N24T7E0PqXrRzaX3beeo9 +BdBfNjc0GOC4kmxkMtnHRnVxtKOzeNwa8u/aToo3dko8a2TtgdioBT3u5jPLY7Pz +AawEAaI= +-----END CERTIFICATE----- diff --git a/3.kubernetes/ingress/tls.key b/3.kubernetes/ingress/tls.key new file mode 100644 index 0000000..1bffc53 --- /dev/null +++ b/3.kubernetes/ingress/tls.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCg2N/dKQzIT2jv +FNy8beAVJwNtAO5yHAkwQru6R3TgF1Lmtkn43jjGio8fkmANU9iOn70vN9L8Ijyx +mUyDyvWF0vbS/2XfDVr8an0SYSlWxo4E8YtEbwEYQJm5xcHnfg0CLPqA2YK4a6SG +GSBJg/NkVhBT7WE3D7P03+VZ/EFROnTlD/DaRQpPOIKdltao50jBfOCuhEgWDrxY +u+n3jhmeTsBsEvVNg3iw6eXHisAc8F4qkboDER8gAoKwRRSvgUpKvHbinSno+EhM +FTtoCOBxxVwcKS7aaNogL+f3QP/p8aNMVnn5YxcE4Mcc8LZnyopUfWGcabLNkFom +5wRZp+zDAgMBAAECggEAOscl/365whVqxjt8cotAYNpx4Qp/GEnwfadVTLxCFIXR +cKfankwuuTb3GFV4Lkaek3gCPVgMDMFCJrBbiqnHUREHy5EzG/CYeDc931KMNb63 +NU1DVE2wO7mXs5B1zG9+t3XSUPWrVFNZuvtBljvW3KqqqtDLPsDJRUnwWRLal6DN +o2RM0wUmAEdnsXicCLN76QDSyDpgDbHzZe2lI3zgKIm1p15DUHh3HZLTlY4ER1IS +nq4TLXX9raHHVUY9pvwiiOhoRUsNHL/WScHpC5Co7K1lRWpok7egZtrtsbz6on/C +kSThCWnrTUcHeUdX5M0DHb7A5X3OdaRkNJlD7ya3CQKBgQDSCwgRjT4uDlS+Nr3V +v89IIxpk/tqNNrwEtrXr2VsVJeW5dpReo/2Tn4bD8zGE2+V8tLq10jCoVzbhjPZI +rdgDdi1F6Yk/CWDdFP00w0h0FDbJSXOcSoFuQcE5tLKqcr4pFEnwGKwHmQ+CrsO9 +hrWpjAQk2IF+R39ZF1fP3YRZTQKBgQDECkXIaVff/X0jhGaMBNEEtR+1ePE/5qUy +EgP+NnbKu4p+W8g1ridk90jNcYTQWJDqXgF/W+Oa7d878OP6i69PhTO1rJTGDPUv +h7COCnbajwYigwM9263IbwujQORWLTyuJiO2E0QAteFxaHK380wZrLF6YFE32p77 +bXXogtLWTwKBgCdhGBcqUvERkMPEwZyUhIkyw50RxunYzDFaDAt2ycPSQhqeZ7zC +pCUMMJkGPE97ZrAVtjeme6bkCw8IfZgst6YWfvBvk2K1IGryp3I/9pKEw6zDT6CK +u29vdomaHjEkqBBNlHsmNQKLqMPIfjxHSEHMVW6PBOAnCXIrlTQMhOa1AoGACLzQ +eWtAkGoy7qdTDsCUNFqCTEtUrV1xVXb6GJWC2+xZ8uHSXZoihVyEMSvzGHoqE1gX +Rv7oeRpLDdfhwWQCb0/nixxjESS4tOKeWuZaf8lKI7WlKlelOj7AzQjyZGjlwHlQ +tCfNqx5wKubV3h8I8EgDLQon9I5Y8aniTHewP/8CgYEApo2rh+JpeGWSQrYG8d3a +n4DtmGVQe5OI70s5K4k6nHgHe5kUtz7NT5Iog71gqQCNX9CjusS6X4M16prYr7oV +yu8mC7M8DhXDZHFe76grD7PVKT0MlTTb6VVxQk1KBWIzYCVP1fMd1wQNLSHpADsX +1MsxsQ1KU6Wdom0yCMzGSdg= +-----END PRIVATE KEY----- diff --git a/3.kubernetes/jenkins/add_domian.yaml b/3.kubernetes/jenkins/add_domian.yaml new file mode 100644 index 0000000..7df0cdf --- /dev/null +++ b/3.kubernetes/jenkins/add_domian.yaml @@ -0,0 +1,17 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: hz-jenkins + namespace: jenkins +spec: + rules: + - host: hz-jenkins.pod.opsbase.cn + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: jenkins + port: + number: 8080 \ No newline at end of file diff --git a/3.kubernetes/jenkins/jenkins-all.yaml b/3.kubernetes/jenkins/jenkins-all.yaml new file mode 100644 index 0000000..182c875 --- /dev/null +++ b/3.kubernetes/jenkins/jenkins-all.yaml @@ -0,0 +1,118 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: jenkins +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: jenkins + namespace: jenkins +spec: + accessModes: + - ReadWriteOnce + storageClassName: nfs-storage + resources: + requests: + storage: 200Gi +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: jenkins + namespace: jenkins +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: jenkins-crb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: jenkins + namespace: jenkins +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: jenkins-master + namespace: jenkins +spec: + replicas: 1 + selector: + matchLabels: + devops: jenkins-master + template: + metadata: + labels: + devops: jenkins-master + spec: + serviceAccount: jenkins #Pod 需要使用的服务账号 + initContainers: + - name: fix-permissions + image: busybox + command: ["sh", "-c", "chown -R 1000:1000 /var/jenkins_home"] + securityContext: + privileged: true + volumeMounts: + - name: jenkinshome + mountPath: /var/jenkins_home + containers: + - name: jenkins + # image: jenkinsci/blueocean:1.25.2 + image: jenkinsci/blueocean:latest + imagePullPolicy: IfNotPresent + ports: + - name: http #Jenkins Master Web 服务端口 + containerPort: 8080 + - name: slavelistener #Jenkins Master 供未来 Slave 连接的端口 + containerPort: 50000 + volumeMounts: + - name: jenkinshome + mountPath: /var/jenkins_home + env: + - name: JAVA_OPTS + value: "-Xms4096m -Xmx5120m -Duser.timezone=Asia/Shanghai -Dhudson.model.DirectoryBrowserSupport.CSP=" + volumes: + - name: jenkinshome + persistentVolumeClaim: + claimName: jenkins +--- +apiVersion: v1 +kind: Service +metadata: + name: jenkins + namespace: jenkins +spec: + ports: + - name: http + port: 8080 + targetPort: 8080 + - name: slavelistener + port: 50000 + targetPort: 50000 + type: ClusterIP + selector: + devops: jenkins-master +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: jenkins-web + namespace: jenkins +spec: + ingressClassName: nginx + rules: + - host: jenkins.pod.opsbase.cn + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: jenkins + port: + number: 8080 diff --git a/3.kubernetes/jenkins/jenkins-pvc.yaml b/3.kubernetes/jenkins/jenkins-pvc.yaml new file mode 100644 index 0000000..ca7dc3c --- /dev/null +++ b/3.kubernetes/jenkins/jenkins-pvc.yaml @@ -0,0 +1,12 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: jenkins + namespace: jenkins +spec: + accessModes: + - ReadWriteOnce + storageClassName: nfs-storage + resources: + requests: + storage: 200Gi diff --git a/3.kubernetes/jenkins/pipline/pipline-deploy.Jenkinsfile b/3.kubernetes/jenkins/pipline/pipline-deploy.Jenkinsfile new file mode 100644 index 0000000..4766e97 --- /dev/null +++ b/3.kubernetes/jenkins/pipline/pipline-deploy.Jenkinsfile @@ -0,0 +1,133 @@ +pipeline { + // 指定由Master还是node节点执行任务 + agent {label 'master'} + // agent {label 'jnlp-slave'} + + parameters { + branchFilter: 'origin/(.*)', + defaultValue: 'master', + name: 'GIT_BRANCH', + quickFilterEnabled: false, + selectedValue: 'NONE', + sortMode: 'NONE', + tagFilter: '*', + type: 'PT_BRANCH_TAG' + } + + options { + buildDiscarder(logRotator(numToKeepStr: '10')) + timeout(time: 10, unit: 'MINUTES') // 默认10秒超时 + } + + environment { + // PROJECT = "${JOB_BASE_NAME##*.}" + PROJECT = "${JOB_BASE_NAME}" + HARBOR_URL="harbor.opsbase.cn/public" + // 使用凭证保存钉钉接口token信息 + DINGTALK_CREDS = credentials('dingtalk') + } + stages { + stage('printenv') { + steps { + echo '打印环境变量' + sh 'printenv' + script{ + env.GIT_LOG = "" + env.BUILD_TASKS = "" + env.imageTag = "None" + } + } + } + stage('checkout') { + steps { + // 通过流水线语法自定义生成检出语句 + checkout( + [ + $class: 'GitSCM', + branches: [[name: '*/${GIT_BRANCH}']], + //branches: [[name: '*/develop']], + extensions: [ + [$class: 'CheckoutOption', timeout: 120], + [$class: 'CloneOption', depth: 1, noTags: false, reference: '', shallow: true ,timeout: 60]], + userRemoteConfigs: [[credentialsId: 'gitee', url: '${GIT_URL}']] + ] + + + ) + // checkout([$class: 'GitSCM', branches: [[name: '*/${GIT_BRANCH}']], extensions: [[$class: 'CloneOption', depth: 1, noTags: false, reference: '', shallow: true]], userRemoteConfigs: [[url: '${GIT_URL}']]]) + // updateGitlabCommitStatus(name: env.STAGE_NAME, state: 'success') + script{ + sh "git log --oneline -n 1 > gitlog.file" + env.GIT_LOG = readFile("gitlog.file").trim() + env.imageTag = sh (script: 'git rev-parse --short HEAD ${GIT_COMMIT}', returnStdout: true).trim() + env.BUILD_TASKS = "\n" + env.STAGE_NAME + } + // updateGitlabCommitStatus(name: env.STAGE_NAME, state: 'success') + + } + } + stage('build-image') { + steps { + retry(2) { sh 'docker build . -t ${HARBOR_URL}/${PROJECT}:${imageTag}' } + } + } + stage('push-image') { + steps { + retry(2) { sh 'docker push ${HARBOR_URL}/${PROJECT}:${imageTag}'} + // updateGitlabCommitStatus(name: env.STAGE_NAME, state: 'success') + script{ + env.BUILD_TASKS += "\t=>\t" + env.STAGE_NAME + } + } + } + stage('deploy') { + steps { + sh "sed -i 's#{{IMAGE_URL}}#${HARBOR_URL}/${PROJECT}:${imageTag}#g' manifests/*" + timeout(time: 1, unit: 'MINUTES') { + sh "kubectl apply -f manifests/" + } + // updateGitlabCommitStatus(name: env.STAGE_NAME, state: 'success') + script{ + env.BUILD_TASKS += "\t=>\t" + env.STAGE_NAME + } + } + } + + } + post { + success { + sh """ + curl -s 'https://oapi.dingtalk.com/robot/send?access_token=${DINGTALK_CREDS_PSW}' \ + -H 'Content-Type: application/json' \ + -d '{ + "msgtype": "markdown", + "markdown": { + "title":"${JOB_BASE_NAME}", + "text": "任务构建成功 😄 \n**项目名称**: ${JOB_BASE_NAME}\n \n**构建分支**: ${GIT_BRANCH}\n \n**Git log**: ${GIT_LOG}\n \n**构建任务**:\n ${BUILD_TASKS}\n \n**构建地址**: ${RUN_DISPLAY_URL}" + } + }' + """ + } + failure { + sh """ + curl -s 'https://oapi.dingtalk.com/robot/send?access_token=${DINGTALK_CREDS_PSW}' \ + -H 'Content-Type: application/json' \ + -d '{ + "msgtype": "markdown", + "markdown": { + "title":"${JOB_BASE_NAME}", + "text": "任务构建失败 ❌ \n**项目名称**: ${JOB_BASE_NAME}\n \n**构建分支**: ${GIT_BRANCH}\n \n**Git log**: ${GIT_LOG}\n \n**构建任务**:\n ${BUILD_TASKS}\n \n**构建地址**: ${RUN_DISPLAY_URL}" + } + }' + """ + } + always { + echo '执行完毕 !' + } + } + + +} + + + diff --git a/3.kubernetes/jenkins/pipline/pipline-image.Jenkinsfile b/3.kubernetes/jenkins/pipline/pipline-image.Jenkinsfile new file mode 100644 index 0000000..51e74f1 --- /dev/null +++ b/3.kubernetes/jenkins/pipline/pipline-image.Jenkinsfile @@ -0,0 +1,40 @@ +pipeline { + // 指定由Master还是node节点执行任务 + agent {label 'master'} + environment { + PROJECT = 'myblog' + } + stages { + stage('printenv') { + steps { + echo '打印环境变量' + sh 'printenv' + } + } + stage('Checkout') { + steps { + // 通过流水线语法自定义生成检出语句 + checkout([$class: 'GitSCM', branches: [[name: '*/${GIT_BRANCH}']], extensions: [], userRemoteConfigs: [[url: '${GIT_URL}']]]) + } + } + stage('Build-image') { + steps { + sh 'docker build . -t myblog:latest -f Dockerfile' + } + } + stage('Send-dingtalk') { + steps { + sh """ + // 添加钉钉机器人并将请求ip加入白名单 + curl 'https://oapi.dingtalk.com/robot/send?access_token=b6d0c30412ad11a9c33debc5c2245ffe95abf234079a65a62134d531dd6befe4' \ + -H 'Content-Type: application/json' \ + -d '{"msgtype": "text", + "text": { + "content": "${JOB_BASE_NAME} 镜像构建成功!" + } + }' + """ + } + } + } +} \ No newline at end of file diff --git a/3.kubernetes/jenkins/pipline/pod.Jenkinsfile b/3.kubernetes/jenkins/pipline/pod.Jenkinsfile new file mode 100644 index 0000000..fba4b92 --- /dev/null +++ b/3.kubernetes/jenkins/pipline/pod.Jenkinsfile @@ -0,0 +1,100 @@ +pipeline { + agent { label '172.21.51.68'} + + options { + buildDiscarder(logRotator(numToKeepStr: '10')) + disableConcurrentBuilds() + timeout(time: 20, unit: 'MINUTES') + gitLabConnection('gitlab') + } + + environment { + IMAGE_REPO = "172.21.51.143:5000/demo/myblog" + DINGTALK_CREDS = credentials('dingTalk') + TAB_STR = "\n \n                    " + } + + stages { + stage('printenv') { + steps { + script{ + sh "git log --oneline -n 1 > gitlog.file" + env.GIT_LOG = readFile("gitlog.file").trim() + } + sh 'printenv' + } + } + stage('checkout') { + steps { + checkout scm + updateGitlabCommitStatus(name: env.STAGE_NAME, state: 'success') + script{ + env.BUILD_TASKS = env.STAGE_NAME + "√..." + env.TAB_STR + } + } + } + stage('build-image') { + steps { + retry(2) { sh 'docker build . -t ${IMAGE_REPO}:${GIT_COMMIT}'} + updateGitlabCommitStatus(name: env.STAGE_NAME, state: 'success') + script{ + env.BUILD_TASKS += env.STAGE_NAME + "√..." + env.TAB_STR + } + } + } + stage('push-image') { + steps { + retry(2) { sh 'docker push ${IMAGE_REPO}:${GIT_COMMIT}'} + updateGitlabCommitStatus(name: env.STAGE_NAME, state: 'success') + script{ + env.BUILD_TASKS += env.STAGE_NAME + "√..." + env.TAB_STR + } + } + } + stage('deploy') { + steps { + sh "sed -i 's#{{IMAGE_URL}}#${IMAGE_REPO}:${GIT_COMMIT}#g' manifests/*" + timeout(time: 1, unit: 'MINUTES') { + sh "kubectl apply -f manifests/" + } + updateGitlabCommitStatus(name: env.STAGE_NAME, state: 'success') + script{ + env.BUILD_TASKS += env.STAGE_NAME + "√..." + env.TAB_STR + } + } + } + } + post { + success { + echo 'Congratulations!' + sh """ + curl 'https://oapi.dingtalk.com/robot/send?access_token=${DINGTALK_CREDS_PSW}' \ + -H 'Content-Type: application/json' \ + -d '{ + "msgtype": "markdown", + "markdown": { + "title":"myblog", + "text": "😄👍 构建成功 👍😄 \n**项目名称**:luffy \n**Git log**: ${GIT_LOG} \n**构建分支**: ${BRANCH_NAME} \n**构建地址**:${RUN_DISPLAY_URL} \n**构建任务**:${BUILD_TASKS}" + } + }' + """ + } + failure { + echo 'Oh no!' + sh """ + curl 'https://oapi.dingtalk.com/robot/send?access_token=${DINGTALK_CREDS_PSW}' \ + -H 'Content-Type: application/json' \ + -d '{ + "msgtype": "markdown", + "markdown": { + "title":"myblog", + "text": "😖❌ 构建失败 ❌😖 \n**项目名称**:luffy \n**Git log**: ${GIT_LOG} \n**构建分支**: ${BRANCH_NAME} \n**构建地址**:${RUN_DISPLAY_URL} \n**构建任务**:${BUILD_TASKS}" + } + }' + """ + } + always { + echo 'I will always say Hello again!' + } + } +} \ No newline at end of file diff --git a/3.kubernetes/kubernetes-dashboard/dashboard-admin.yml b/3.kubernetes/kubernetes-dashboard/dashboard-admin.yml new file mode 100644 index 0000000..c5246f7 --- /dev/null +++ b/3.kubernetes/kubernetes-dashboard/dashboard-admin.yml @@ -0,0 +1,21 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: admin + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: admin + namespace: kubernetes-dashboard + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: admin + namespace: kubernetes-dashboard \ No newline at end of file diff --git a/3.kubernetes/kubernetes-dashboard/recommended.yaml b/3.kubernetes/kubernetes-dashboard/recommended.yaml new file mode 100644 index 0000000..6387754 --- /dev/null +++ b/3.kubernetes/kubernetes-dashboard/recommended.yaml @@ -0,0 +1,303 @@ +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Namespace +metadata: + name: kubernetes-dashboard + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +spec: + ports: + - port: 443 + targetPort: 8443 + selector: + k8s-app: kubernetes-dashboard + type: NodePort + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-certs + namespace: kubernetes-dashboard +type: Opaque + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-csrf + namespace: kubernetes-dashboard +type: Opaque +data: + csrf: "" + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-key-holder + namespace: kubernetes-dashboard +type: Opaque + +--- + +kind: ConfigMap +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-settings + namespace: kubernetes-dashboard + +--- + +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +rules: + # Allow Dashboard to get, update and delete Dashboard exclusive secrets. + - apiGroups: [""] + resources: ["secrets"] + resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"] + verbs: ["get", "update", "delete"] + # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["kubernetes-dashboard-settings"] + verbs: ["get", "update"] + # Allow Dashboard to get metrics. + - apiGroups: [""] + resources: ["services"] + resourceNames: ["heapster", "dashboard-metrics-scraper"] + verbs: ["proxy"] + - apiGroups: [""] + resources: ["services/proxy"] + resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"] + verbs: ["get"] + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard +rules: + # Allow Metrics Scraper to get metrics from the Metrics server + - apiGroups: ["metrics.k8s.io"] + resources: ["pods", "nodes"] + verbs: ["get", "list", "watch"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kubernetes-dashboard + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kubernetes-dashboard + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: kubernetes-dashboard + template: + metadata: + labels: + k8s-app: kubernetes-dashboard + spec: + containers: + - name: kubernetes-dashboard + image: kubernetesui/dashboard:v2.2.0 + imagePullPolicy: Always + ports: + - containerPort: 8443 + protocol: TCP + args: + - --auto-generate-certificates + - --namespace=kubernetes-dashboard + # Uncomment the following line to manually specify Kubernetes API server Host + # If not specified, Dashboard will attempt to auto discover the API server and connect + # to it. Uncomment only if the default does not work. + # - --apiserver-host=http://my-address:port + volumeMounts: + - name: kubernetes-dashboard-certs + mountPath: /certs + # Create on-disk volume to store exec logs + - mountPath: /tmp + name: tmp-volume + livenessProbe: + httpGet: + scheme: HTTPS + path: / + port: 8443 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + volumes: + - name: kubernetes-dashboard-certs + secret: + secretName: kubernetes-dashboard-certs + - name: tmp-volume + emptyDir: {} + serviceAccountName: kubernetes-dashboard + nodeSelector: + "kubernetes.io/os": linux + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kubernetes-dashboard +spec: + ports: + - port: 8000 + targetPort: 8000 + selector: + k8s-app: dashboard-metrics-scraper + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kubernetes-dashboard +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: dashboard-metrics-scraper + template: + metadata: + labels: + k8s-app: dashboard-metrics-scraper + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' + spec: + containers: + - name: dashboard-metrics-scraper + image: kubernetesui/metrics-scraper:v1.0.6 + ports: + - containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + scheme: HTTP + path: / + port: 8000 + initialDelaySeconds: 30 + timeoutSeconds: 30 + volumeMounts: + - mountPath: /tmp + name: tmp-volume + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + serviceAccountName: kubernetes-dashboard + nodeSelector: + "kubernetes.io/os": linux + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + volumes: + - name: tmp-volume + emptyDir: {} diff --git a/3.kubernetes/kubesphere/v3.2.1/cluster-configuration.yaml b/3.kubernetes/kubesphere/v3.2.1/cluster-configuration.yaml new file mode 100644 index 0000000..5fbd1a5 --- /dev/null +++ b/3.kubernetes/kubesphere/v3.2.1/cluster-configuration.yaml @@ -0,0 +1,174 @@ +--- +apiVersion: installer.kubesphere.io/v1alpha1 +kind: ClusterConfiguration +metadata: + name: ks-installer + namespace: kubesphere-system + labels: + version: v3.2.1 +spec: + persistence: + storageClass: "" # If there is no default StorageClass in your cluster, you need to specify an existing StorageClass here. + authentication: + jwtSecret: "" # Keep the jwtSecret consistent with the Host Cluster. Retrieve the jwtSecret by executing "kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret" on the Host Cluster. + local_registry: "" # Add your private registry address if it is needed. + # dev_tag: "" # Add your kubesphere image tag you want to install, by default it's same as ks-install release version. + etcd: + monitoring: false # Enable or disable etcd monitoring dashboard installation. You have to create a Secret for etcd before you enable it. + endpointIps: localhost # etcd cluster EndpointIps. It can be a bunch of IPs here. + port: 2379 # etcd port. + tlsEnable: true + common: + core: + console: + enableMultiLogin: true # Enable or disable simultaneous logins. It allows different users to log in with the same account at the same time. + port: 30880 + type: NodePort + # apiserver: # Enlarge the apiserver and controller manager's resource requests and limits for the large cluster + # resources: {} + # controllerManager: + # resources: {} + redis: + enabled: false + volumeSize: 2Gi # Redis PVC size. + openldap: + enabled: false + volumeSize: 2Gi # openldap PVC size. + minio: + volumeSize: 20Gi # Minio PVC size. + monitoring: + # type: external # Whether to specify the external prometheus stack, and need to modify the endpoint at the next line. + endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090 # Prometheus endpoint to get metrics data. + GPUMonitoring: # Enable or disable the GPU-related metrics. If you enable this switch but have no GPU resources, Kubesphere will set it to zero. + enabled: false + gpu: # Install GPUKinds. The default GPU kind is nvidia.com/gpu. Other GPU kinds can be added here according to your needs. + kinds: + - resourceName: "nvidia.com/gpu" + resourceType: "GPU" + default: true + es: # Storage backend for logging, events and auditing. + # master: + # volumeSize: 4Gi # The volume size of Elasticsearch master nodes. + # replicas: 1 # The total number of master nodes. Even numbers are not allowed. + # resources: {} + # data: + # volumeSize: 20Gi # The volume size of Elasticsearch data nodes. + # replicas: 1 # The total number of data nodes. + # resources: {} + logMaxAge: 7 # Log retention time in built-in Elasticsearch. It is 7 days by default. + elkPrefix: logstash # The string making up index names. The index name will be formatted as ks--log. + basicAuth: + enabled: false + username: "" + password: "" + externalElasticsearchUrl: "" + externalElasticsearchPort: "" + alerting: # (CPU: 0.1 Core, Memory: 100 MiB) It enables users to customize alerting policies to send messages to receivers in time with different time intervals and alerting levels to choose from. + enabled: false # Enable or disable the KubeSphere Alerting System. + # thanosruler: + # replicas: 1 + # resources: {} + auditing: # Provide a security-relevant chronological set of records,recording the sequence of activities happening on the platform, initiated by different tenants. + enabled: false # Enable or disable the KubeSphere Auditing Log System. + # operator: + # resources: {} + # webhook: + # resources: {} + devops: # (CPU: 0.47 Core, Memory: 8.6 G) Provide an out-of-the-box CI/CD system based on Jenkins, and automated workflow tools including Source-to-Image & Binary-to-Image. + enabled: false # Enable or disable the KubeSphere DevOps System. + # resources: {} + jenkinsMemoryLim: 2Gi # Jenkins memory limit. + jenkinsMemoryReq: 1500Mi # Jenkins memory request. + jenkinsVolumeSize: 8Gi # Jenkins volume size. + jenkinsJavaOpts_Xms: 512m # The following three fields are JVM parameters. + jenkinsJavaOpts_Xmx: 512m + jenkinsJavaOpts_MaxRAM: 2g + events: # Provide a graphical web console for Kubernetes Events exporting, filtering and alerting in multi-tenant Kubernetes clusters. + enabled: false # Enable or disable the KubeSphere Events System. + # operator: + # resources: {} + # exporter: + # resources: {} + # ruler: + # enabled: true + # replicas: 2 + # resources: {} + logging: # (CPU: 57 m, Memory: 2.76 G) Flexible logging functions are provided for log query, collection and management in a unified console. Additional log collectors can be added, such as Elasticsearch, Kafka and Fluentd. + enabled: false # Enable or disable the KubeSphere Logging System. + containerruntime: docker + logsidecar: + enabled: true + replicas: 2 + # resources: {} + metrics_server: # (CPU: 56 m, Memory: 44.35 MiB) It enables HPA (Horizontal Pod Autoscaler). + enabled: false # Enable or disable metrics-server. + monitoring: + storageClass: "" # If there is an independent StorageClass you need for Prometheus, you can specify it here. The default StorageClass is used by default. + # kube_rbac_proxy: + # resources: {} + # kube_state_metrics: + # resources: {} + # prometheus: + # replicas: 1 # Prometheus replicas are responsible for monitoring different segments of data source and providing high availability. + # volumeSize: 20Gi # Prometheus PVC size. + # resources: {} + # operator: + # resources: {} + # adapter: + # resources: {} + # node_exporter: + # resources: {} + # alertmanager: + # replicas: 1 # AlertManager Replicas. + # resources: {} + # notification_manager: + # resources: {} + # operator: + # resources: {} + # proxy: + # resources: {} + gpu: # GPU monitoring-related plug-in installation. + nvidia_dcgm_exporter: # Ensure that gpu resources on your hosts can be used normally, otherwise this plug-in will not work properly. + enabled: false # Check whether the labels on the GPU hosts contain "nvidia.com/gpu.present=true" to ensure that the DCGM pod is scheduled to these nodes. + # resources: {} + multicluster: + clusterRole: none # host | member | none # You can install a solo cluster, or specify it as the Host or Member Cluster. + network: + networkpolicy: # Network policies allow network isolation within the same cluster, which means firewalls can be set up between certain instances (Pods). + # Make sure that the CNI network plugin used by the cluster supports NetworkPolicy. There are a number of CNI network plugins that support NetworkPolicy, including Calico, Cilium, Kube-router, Romana and Weave Net. + enabled: false # Enable or disable network policies. + ippool: # Use Pod IP Pools to manage the Pod network address space. Pods to be created can be assigned IP addresses from a Pod IP Pool. + type: none # Specify "calico" for this field if Calico is used as your CNI plugin. "none" means that Pod IP Pools are disabled. + topology: # Use Service Topology to view Service-to-Service communication based on Weave Scope. + type: none # Specify "weave-scope" for this field to enable Service Topology. "none" means that Service Topology is disabled. + openpitrix: # An App Store that is accessible to all platform tenants. You can use it to manage apps across their entire lifecycle. + store: + enabled: false # Enable or disable the KubeSphere App Store. + servicemesh: # (0.3 Core, 300 MiB) Provide fine-grained traffic management, observability and tracing, and visualized traffic topology. + enabled: false # Base component (pilot). Enable or disable KubeSphere Service Mesh (Istio-based). + kubeedge: # Add edge nodes to your cluster and deploy workloads on edge nodes. + enabled: false # Enable or disable KubeEdge. + cloudCore: + nodeSelector: {"node-role.kubernetes.io/worker": ""} + tolerations: [] + cloudhubPort: "10000" + cloudhubQuicPort: "10001" + cloudhubHttpsPort: "10002" + cloudstreamPort: "10003" + tunnelPort: "10004" + cloudHub: + advertiseAddress: # At least a public IP address or an IP address which can be accessed by edge nodes must be provided. + - "" # Note that once KubeEdge is enabled, CloudCore will malfunction if the address is not provided. + nodeLimit: "100" + service: + cloudhubNodePort: "30000" + cloudhubQuicNodePort: "30001" + cloudhubHttpsNodePort: "30002" + cloudstreamNodePort: "30003" + tunnelNodePort: "30004" + edgeWatcher: + nodeSelector: {"node-role.kubernetes.io/worker": ""} + tolerations: [] + edgeWatcherAgent: + nodeSelector: {"node-role.kubernetes.io/worker": ""} + tolerations: [] diff --git a/3.kubernetes/kubesphere/v3.2.1/kubesphere-installer.yaml b/3.kubernetes/kubesphere/v3.2.1/kubesphere-installer.yaml new file mode 100644 index 0000000..536cc32 --- /dev/null +++ b/3.kubernetes/kubesphere/v3.2.1/kubesphere-installer.yaml @@ -0,0 +1,307 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterconfigurations.installer.kubesphere.io +spec: + group: installer.kubesphere.io + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-preserve-unknown-fields: true + scope: Namespaced + names: + plural: clusterconfigurations + singular: clusterconfiguration + kind: ClusterConfiguration + shortNames: + - cc + +--- +apiVersion: v1 +kind: Namespace +metadata: + name: kubesphere-system + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ks-installer + namespace: kubesphere-system + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ks-installer +rules: +- apiGroups: + - "" + resources: + - '*' + verbs: + - '*' +- apiGroups: + - apps + resources: + - '*' + verbs: + - '*' +- apiGroups: + - extensions + resources: + - '*' + verbs: + - '*' +- apiGroups: + - batch + resources: + - '*' + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - apiregistration.k8s.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - tenant.kubesphere.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - certificates.k8s.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - devops.kubesphere.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - monitoring.coreos.com + resources: + - '*' + verbs: + - '*' +- apiGroups: + - logging.kubesphere.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - jaegertracing.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - storage.k8s.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - admissionregistration.k8s.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - policy + resources: + - '*' + verbs: + - '*' +- apiGroups: + - autoscaling + resources: + - '*' + verbs: + - '*' +- apiGroups: + - networking.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - config.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - iam.kubesphere.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - notification.kubesphere.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - auditing.kubesphere.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - events.kubesphere.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - core.kubefed.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - installer.kubesphere.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - storage.kubesphere.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - security.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - monitoring.kiali.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - kiali.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - networking.k8s.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - kubeedge.kubesphere.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - types.kubefed.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - monitoring.kubesphere.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - application.kubesphere.io + resources: + - '*' + verbs: + - '*' + + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ks-installer +subjects: +- kind: ServiceAccount + name: ks-installer + namespace: kubesphere-system +roleRef: + kind: ClusterRole + name: ks-installer + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ks-installer + namespace: kubesphere-system + labels: + app: ks-install +spec: + replicas: 1 + selector: + matchLabels: + app: ks-install + template: + metadata: + labels: + app: ks-install + spec: + serviceAccountName: ks-installer + containers: + - name: installer + image: kubesphere/ks-installer:v3.2.1 + imagePullPolicy: "Always" + resources: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: 20m + memory: 100Mi + volumeMounts: + - mountPath: /etc/localtime + name: host-time + readOnly: true + volumes: + - hostPath: + path: /etc/localtime + type: "" + name: host-time diff --git a/3.kubernetes/kubesphere/v3.2.1/svc.yml b/3.kubernetes/kubesphere/v3.2.1/svc.yml new file mode 100644 index 0000000..e3baeae --- /dev/null +++ b/3.kubernetes/kubesphere/v3.2.1/svc.yml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: ks-install-svc + namespace: test +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 8000 + selector: + app: ks-install + type: ClusterIP diff --git a/3.kubernetes/limitRange/cpu-limit.yml b/3.kubernetes/limitRange/cpu-limit.yml new file mode 100644 index 0000000..8a1818a --- /dev/null +++ b/3.kubernetes/limitRange/cpu-limit.yml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: cpu-limit-range + namespace: test +spec: + limits: + - default: + cpu: 200m + defaultRequest: + cpu: 100m + type: Container \ No newline at end of file diff --git a/3.kubernetes/limitRange/mem-limit.yml b/3.kubernetes/limitRange/mem-limit.yml new file mode 100644 index 0000000..8608a8a --- /dev/null +++ b/3.kubernetes/limitRange/mem-limit.yml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: mem-limit-range + namespace: test +spec: + limits: + - default: + memory: 1024Mi + defaultRequest: + memory: 512Mi + type: Container \ No newline at end of file diff --git a/3.kubernetes/one-pod/pod-with-volume.yml b/3.kubernetes/one-pod/pod-with-volume.yml new file mode 100644 index 0000000..400dd77 --- /dev/null +++ b/3.kubernetes/one-pod/pod-with-volume.yml @@ -0,0 +1,39 @@ +apiVersion: v1 +kind: Pod +metadata: + name: cmdb + namespace: test + labels: + component: cmdb +spec: + volumes: + - name: mysql-data + hostPath: + path: /opt/mysql/data + nodeSelector: # 使用节点选择器将Pod调度到指定label的节点 + component: mysql + containers: + - name: cmdb + image: harbor.opsbase.cn/public/cmdb:latest + env: + - name: MYSQL_HOST # 指定root用户的用户名 + value: "127.0.0.1" + - name: MYSQL_PASSWD + value: "123456" + ports: + - containerPort: 8000 + - name: mysql + image: mysql:5.7 + args: + - --character-set-server=utf8mb4 + - --collation-server=utf8mb4_unicode_ci + ports: + - containerPort: 3306 + env: + - name: MYSQL_ROOT_PASSWORD + value: "123456" + - name: MYSQL_DATABASE + value: "cmdb" + volumeMounts: + - name: mysql-data + mountPath: /var/lib/mysql diff --git a/3.kubernetes/one-pod/sshd-pod.yml b/3.kubernetes/one-pod/sshd-pod.yml new file mode 100644 index 0000000..5a0c3a4 --- /dev/null +++ b/3.kubernetes/one-pod/sshd-pod.yml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: sshd-demo + namespace: test +spec: + nodeSelector: # 使用节点选择器将Pod调度到指定label的节点 + standalone: "true" + containers: + - name: sshd-demo + image: lghost/sshd:v0.1 + ports: + - containerPort: 22 \ No newline at end of file diff --git a/3.kubernetes/pod-lifecycle/pod-lifecycle.yml b/3.kubernetes/pod-lifecycle/pod-lifecycle.yml new file mode 100644 index 0000000..d9bb16c --- /dev/null +++ b/3.kubernetes/pod-lifecycle/pod-lifecycle.yml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Pod +metadata: + name: pod-lifecycle + namespace: test + labels: + component: pod-lifecycless +spec: + initContainers: + - name: init + image: busybox + command: ['sh', '-c', 'echo $(date +%s): INIT >> /loap/timing'] + volumeMounts: + - mountPath: /loap + name: timing + containers: + - name: main + image: busybox + command: ['sh', '-c', 'echo $(date +%s): START >> /loap/timing; +sleep 10; echo $(date +%s): END >> /loap/timing;'] + volumeMounts: + - mountPath: /loap + name: timing + livenessProbe: + exec: + command: ['sh', '-c', 'echo $(date +%s): LIVENESS >> /loap/timing'] + readinessProbe: + exec: + command: ['sh', '-c', 'echo $(date +%s): READINESS >> /loap/timing'] + lifecycle: + postStart: + exec: + command: ['sh', '-c', 'echo $(date +%s): POST-START >> /loap/timing'] + preStop: + exec: + command: ['sh', '-c', 'echo $(date +%s): PRE-STOP >> /loap/timing'] + volumes: + - name: timing + hostPath: + path: /tmp/loap diff --git a/3.kubernetes/rbac/demo-admin-rbac.yaml b/3.kubernetes/rbac/demo-admin-rbac.yaml new file mode 100644 index 0000000..1fba9ec --- /dev/null +++ b/3.kubernetes/rbac/demo-admin-rbac.yaml @@ -0,0 +1,30 @@ +# 针对demo 命名空间授权 +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pre-admin + namespace: demo +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: demo + name: pods-reader-writer +rules: + - apiGroups: [""] # "" indicates the core API group + resources: ["*"] + verbs: ["*"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: pods-reader-writer + namespace: demo +subjects: + - kind: ServiceAccount #这里可以是User,Group,ServiceAccount + name: demo-pods-admin + namespace: demo +roleRef: + kind: Role #这里可以是Role或者ClusterRole,若是ClusterRole,则权限也仅限于rolebinding的内部 + name: pods-reader-writer + apiGroup: rbac.authorization.k8s.io diff --git a/3.kubernetes/rbac/demo-limit.yml b/3.kubernetes/rbac/demo-limit.yml new file mode 100644 index 0000000..5564222 --- /dev/null +++ b/3.kubernetes/rbac/demo-limit.yml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: mem-limit-range + namespace: demo +spec: + limits: + - default: + memory: 512Mi + defaultRequest: + memory: 256Mi + type: Container +--- +apiVersion: v1 +kind: Pod +metadata: + name: default-mem-demo + namespace: demo +spec: + containers: + - name: default-mem-demo + image: nginx:alpine diff --git a/3.kubernetes/secret/cmdb-secret.yml b/3.kubernetes/secret/cmdb-secret.yml new file mode 100644 index 0000000..bdd4d0a --- /dev/null +++ b/3.kubernetes/secret/cmdb-secret.yml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + name: mysql-cmdb + namespace: test +type: Opaque +data: + DB_USER: cm9vdA== + DB_PASSWORD: MTIzNDU2 + +# 注意加-n参数,echo -n 123456|base64 \ No newline at end of file diff --git a/3.kubernetes/secret/secret.yml b/3.kubernetes/secret/secret.yml new file mode 100644 index 0000000..7ac3a46 --- /dev/null +++ b/3.kubernetes/secret/secret.yml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: nacos +type: Opaque +data: + nacosPwd: elhJd2FhVGk0N1Bq + +# 注意加-n参数,echo -n 123456|base64 \ No newline at end of file diff --git a/3.kubernetes/services/svc-cmdb.yml b/3.kubernetes/services/svc-cmdb.yml new file mode 100644 index 0000000..8ddd121 --- /dev/null +++ b/3.kubernetes/services/svc-cmdb.yml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: cmdb-svc + namespace: test +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 8000 + selector: + app: cmdb + type: ClusterIP \ No newline at end of file diff --git a/3.kubernetes/services/svc-myblog.yml b/3.kubernetes/services/svc-myblog.yml new file mode 100644 index 0000000..268a526 --- /dev/null +++ b/3.kubernetes/services/svc-myblog.yml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: myblog + namespace: test +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app: myblog + type: ClusterIP \ No newline at end of file diff --git a/3.kubernetes/storage/nfs-pv.yml b/3.kubernetes/storage/nfs-pv.yml new file mode 100644 index 0000000..835bc4d --- /dev/null +++ b/3.kubernetes/storage/nfs-pv.yml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: nfs-pv + namespace: test +spec: + capacity: + storage: 50Gi + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + nfs: + path: /opt/data/nfs + server: 66.94.121.23 \ No newline at end of file diff --git a/3.kubernetes/storage/nfs-pvc.yml b/3.kubernetes/storage/nfs-pvc.yml new file mode 100644 index 0000000..a9f0661 --- /dev/null +++ b/3.kubernetes/storage/nfs-pvc.yml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: nfs-pvc + namespace: test +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 2Gi \ No newline at end of file diff --git a/3.kubernetes/storage/nginx-nfs.yml b/3.kubernetes/storage/nginx-nfs.yml new file mode 100644 index 0000000..59231bc --- /dev/null +++ b/3.kubernetes/storage/nginx-nfs.yml @@ -0,0 +1,30 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: # metadata是该资源的元数据,name是必须的元数据项 + name: nginx-nfs-test + namespace: test # 指定命名空间 + +spec: # spec部分是该Deployment的规则说明 + replicas: 2 + selector: + matchLabels: + app: nginx + template: # template定义Pod的模板,这是配置的重要部分 + metadata: # 定义Pod的元数据,至少要顶一个label,label的key和value可以任意指定 + labels: + app: nginx + spec: # spec描述的是Pod的规则,此部分定义pod中每一个容器的属性,name和image是必需的 + containers: + - name: nginx + image: nginx:alpine # 镜像;openresty/openresty or nginx:alpine + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 + volumeMounts: # 挂载容器中的目录到pvc nfs中的目录 + - name: www + mountPath: /usr/share/nginx/html + volumes: + - name: www + persistentVolumeClaim: # 指定pvc资源 + claimName: nfs-pvc + \ No newline at end of file diff --git a/3.kubernetes/storageClass/nfs-nginx-nginx-pvc.yml b/3.kubernetes/storageClass/nfs-nginx-nginx-pvc.yml new file mode 100644 index 0000000..81d234d --- /dev/null +++ b/3.kubernetes/storageClass/nfs-nginx-nginx-pvc.yml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment-pvc +spec: + replicas: 2 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx + volumeMounts: + - name: wwwroot + mountPath: /usr/share/nginx/html + ports: + - containerPort: 80 + volumes: + - name: wwwroot + persistentVolumeClaim: + claimName: nginx-pvc + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: nginx-pvc + # annotations: # 注释使用默认 + # volume.beta.kubernetes.io/storage-class: "nfs-storage" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 5Gi + +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx-svc +spec: + selector: + app: nginx + ports: + - name: http80 + port: 80 + protocol: TCP + targetPort: 80 + type: ClusterIP diff --git a/3.kubernetes/storageClass/nfs-nginx-pvc-statefulset.yml b/3.kubernetes/storageClass/nfs-nginx-pvc-statefulset.yml new file mode 100644 index 0000000..04138c6 --- /dev/null +++ b/3.kubernetes/storageClass/nfs-nginx-pvc-statefulset.yml @@ -0,0 +1,49 @@ +apiVersion: apps/v1 +kind: StatefulSet # 每个pod申请一个独立的pvc资源 +metadata: + name: nginx-nfs-deployment + labels: + app: nginx-nfs-deployment +spec: + replicas: 1 + serviceName: nginx-svc + template: + metadata: + name: nginx-nfs-deployment + labels: + app: nginx-nfs-deployment + spec: + containers: + - name: nginx-nfs-deployment + image: bitnami/nginx:1.23-debian-11 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 + volumeMounts: + - mountPath: /usr/share/nginx/html/ + name: nginxvolume + restartPolicy: Always + volumeClaimTemplates: + - metadata: + name: nginxvolume + annotations: + volume.beta.kubernetes.io/storage-class: "nfs-storage" + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + selector: + matchLabels: + app: nginx-nfs-deployment +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx-svc +spec: + selector: + app: nginx-nfs-deployment + ports: + - port: 80 diff --git a/3.kubernetes/storageClass/nfs-provisioner.yml b/3.kubernetes/storageClass/nfs-provisioner.yml new file mode 100644 index 0000000..0260053 --- /dev/null +++ b/3.kubernetes/storageClass/nfs-provisioner.yml @@ -0,0 +1,114 @@ +--- +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "false" # 是否设置为默认sc + name: nfs-storage +provisioner: nfs-provisioner +volumeBindingMode: Immediate +reclaimPolicy: Delete + +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-client-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + strategy: + type: Recreate + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: registry.cn-beijing.aliyuncs.com/mydlq/nfs-subdir-external-provisioner:v4.0.0 # quay.io/external_storage/nfs-client-provisioner:latest + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: nfs-provisioner # 和Storage中provisioner保持一致便可 + - name: NFS_SERVER + value: 66.94.121.23 # nfs服务器地址 + - name: NFS_PATH + value: /opt/data/nfs # 共享存储目录 + volumes: + - name: nfs-client-root + nfs: + server: 66.94.121.23 + path: /opt/data/nfs + +--- # rbac授权 +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nfs-client-provisioner + namespace: default +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + + namespace: default +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: default +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: leader-locking-nfs-client-provisioner + namespace: default +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + + namespace: default +roleRef: + kind: Role + name: leader-locking-nfs-client-provisioner + apiGroup: rbac.authorization.k8s.io diff --git a/3.kubernetes/storageClass/nfs-pvc-1g-test.yml b/3.kubernetes/storageClass/nfs-pvc-1g-test.yml new file mode 100644 index 0000000..382be69 --- /dev/null +++ b/3.kubernetes/storageClass/nfs-pvc-1g-test.yml @@ -0,0 +1,13 @@ +kind: PersistentVolumeClaim # 测试pvc申请 +apiVersion: v1 +metadata: + name: test-nfs-pvc + annotations: + volume.beta.kubernetes.io/storage-class: "nfs-storage" +spec: + storageClassName: "nfs-storage" + accessModes: + - ReadWriteMany # 多读写 + resources: + requests: + storage: 1Gi diff --git a/3.kubernetes/two-pod/cmdb.yml b/3.kubernetes/two-pod/cmdb.yml new file mode 100644 index 0000000..cffdb80 --- /dev/null +++ b/3.kubernetes/two-pod/cmdb.yml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Pod +metadata: + name: cmdb + namespace: test + labels: + component: cmdb +spec: + containers: + - name: cmdb + image: harbor.opsbase.cn/public/cmdb:latest + imagePullPolicy: IfNotPresent + env: + - name: DB_HOST # 指定root用户的用户名 + value: "66.94.125.73" + - name: DB_PASSWORD + value: "123456" + ports: + - containerPort: 8000 + resources: + requests: + memory: 512Mi + cpu: 50m + limits: + memory: 2000Mi + cpu: 100m + livenessProbe: + httpGet: + path: /prometheus/metrics + port: 8000 + scheme: HTTP + initialDelaySeconds: 60 # 容器启动后第一次执行探测是需要等待多少秒 + periodSeconds: 15 # 执行探测的频率 + timeoutSeconds: 2 # 探测超时时间 + readinessProbe: + httpGet: + path: /prometheus/metrics + port: 8000 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 2 + periodSeconds: 15 \ No newline at end of file diff --git a/3.kubernetes/two-pod/myblog.yml b/3.kubernetes/two-pod/myblog.yml new file mode 100644 index 0000000..d433ce7 --- /dev/null +++ b/3.kubernetes/two-pod/myblog.yml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Pod +metadata: + name: myblog + namespace: test + labels: + component: myblog +spec: + containers: + - name: myblog + image: harbor.opsbase.cn/public/myblog:v1 + imagePullPolicy: IfNotPresent + env: + - name: MYSQL_HOST # 指定root用户的用户名 + value: "66.94.125.73" + - name: MYSQL_PASSWD + value: "123456" + ports: + - containerPort: 8002 + resources: + requests: + memory: 100Mi + cpu: 50m + limits: + memory: 500Mi + cpu: 100m + livenessProbe: + httpGet: + path: /blog/index/ + port: 8002 + scheme: HTTP + initialDelaySeconds: 10 # 容器启动后第一次执行探测是需要等待多少秒 + periodSeconds: 15 # 执行探测的频率 + timeoutSeconds: 2 # 探测超时时间 + readinessProbe: + httpGet: + path: /blog/index/ + port: 8002 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 2 + periodSeconds: 15 \ No newline at end of file diff --git a/3.kubernetes/two-pod/mysql.yml b/3.kubernetes/two-pod/mysql.yml new file mode 100644 index 0000000..16a7ad1 --- /dev/null +++ b/3.kubernetes/two-pod/mysql.yml @@ -0,0 +1,48 @@ +apiVersion: v1 +kind: Pod +metadata: + name: mysql + namespace: test + labels: + component: mysql +spec: + hostNetwork: true # 声明pod的网络模式为host模式,效果同docker run --net=host + volumes: + - name: mysql-data + hostPath: + path: /opt/mysql/data + nodeSelector: # 使用节点选择器将Pod调度到指定label的节点 + component: mysql + containers: + - name: mysql + image: mysql:5.7 + args: + - --character-set-server=utf8mb4 + - --collation-server=utf8mb4_unicode_ci + ports: + - containerPort: 3306 + env: + - name: MYSQL_ROOT_PASSWORD + value: "opsbase.cn" + - name: MYSQL_DATABASE + value: "cmdb" + resources: + requests: + memory: 2000Mi + cpu: 800m + limits: + memory: 4Gi + cpu: 2 + readinessProbe: + tcpSocket: + port: 3306 + initialDelaySeconds: 5 + periodSeconds: 10 + livenessProbe: + tcpSocket: + port: 3306 + initialDelaySeconds: 15 + periodSeconds: 20 + volumeMounts: + - name: mysql-data + mountPath: /var/lib/mysql \ No newline at end of file diff --git a/3.kubernetes/two-pod/test.md b/3.kubernetes/two-pod/test.md new file mode 100644 index 0000000..e69de29 diff --git a/4.monitor/consul.yml b/4.monitor/consul.yml new file mode 100644 index 0000000..f928295 --- /dev/null +++ b/4.monitor/consul.yml @@ -0,0 +1,44 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: consul-data-pvc + namespace: monitor +spec: + accessModes: + - ReadWriteMany + storageClassName: nfs-provisioner # 您需要提前定义的NFS存储类 + resources: + requests: + storage: 10Gi # 根据您的存储需求进行调整 + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: consul + namespace: monitor +spec: + replicas: 1 # 根据您的需求进行调整 + selector: + matchLabels: + app: consul + template: + metadata: + labels: + app: consul + spec: + containers: + - name: consul + image: consul:1.15 + ports: + - containerPort: 8500 + volumeMounts: + - name: consul-data + mountPath: /consul/data + volumes: + - name: consul-data + persistentVolumeClaim: + claimName: consul-data-pvc + # namespace: monitor + diff --git a/4.monitor/grafana/granfna.yml b/4.monitor/grafana/granfna.yml new file mode 100644 index 0000000..ab18091 --- /dev/null +++ b/4.monitor/grafana/granfna.yml @@ -0,0 +1,66 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + app: grafana + name: grafana + namespace: monitor +spec: + replicas: 1 + selector: + matchLabels: + app: grafana + template: + metadata: + labels: + app: grafana + spec: + nodeSelector: + node-type: grafana + securityContext: + runAsNonRoot: true + runAsUser: 10555 + fsGroup: 10555 + containers: + - name: grafana + image: grafana/grafana:latest + imagePullPolicy: IfNotPresent + env: + - name: GF_AUTH_BASIC_ENABLED + value: "true" + - name: GF_AUTH_ANONYMOUS_ENABLED + value: "false" + readinessProbe: + httpGet: + path: /login + port: 3000 + volumeMounts: + - mountPath: /var/lib/grafana + name: monitor-data + ports: + - containerPort: 3000 + protocol: TCP + volumes: + - name: monitor-data + persistentVolumeClaim: + claimName: grafana-data-pvc + + # emptyDir: {} + + # hostPath: + # path: /data/grafana + # type: DirectoryOrCreate +--- +kind: Service +apiVersion: v1 +metadata: + labels: + app: grafana + name: grafana-service + namespace: monitor +spec: + ports: + - port: 3000 + targetPort: 3000 + selector: + app: grafana \ No newline at end of file diff --git a/4.monitor/grafana/readme.md b/4.monitor/grafana/readme.md new file mode 100644 index 0000000..7292f7e --- /dev/null +++ b/4.monitor/grafana/readme.md @@ -0,0 +1,15 @@ +# 命令 + +```bash +# 指定lab +kubectl label nodes k8s-1 node-type=grafana +``` + +修改 Deployment 配置,添加 nodeSelector 字段: +``` +spec: + template: + spec: + nodeSelector: + node-type: grafana +``` \ No newline at end of file diff --git a/4.monitor/node-exporter.yml b/4.monitor/node-exporter.yml new file mode 100644 index 0000000..84b643b --- /dev/null +++ b/4.monitor/node-exporter.yml @@ -0,0 +1,42 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: node-exporter + namespace: monitor + labels: + app: node-exporter +spec: + selector: + matchLabels: + app: node-exporter + template: + metadata: + labels: + app: node-exporter + spec: + containers: + - name: node-exporter + image: prom/node-exporter + ports: + - containerPort: 9100 + name: metrics + volumeMounts: + - name: proc + mountPath: /host/proc + readOnly: true + - name: sys + mountPath: /host/sys + readOnly: true + - name: root + mountPath: /rootfs + readOnly: true + volumes: + - name: proc + hostPath: + path: /proc + - name: sys + hostPath: + path: /sys + - name: root + hostPath: + path: / \ No newline at end of file diff --git a/4.monitor/prometheus/1.rbac.yml b/4.monitor/prometheus/1.rbac.yml new file mode 100644 index 0000000..e499855 --- /dev/null +++ b/4.monitor/prometheus/1.rbac.yml @@ -0,0 +1,55 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: prometheus + namespace: monitor +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: prometheus +rules: +- apiGroups: + - "" + resources: + - nodes + - services + - endpoints + - pods + - nodes/proxy + verbs: + - get + - list + - watch +- apiGroups: + - "extensions" + resources: + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - configmaps + - nodes/metrics + verbs: + - get +- nonResourceURLs: + - /metrics + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: prometheus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: prometheus +subjects: +- kind: ServiceAccount + name: prometheus + namespace: monitor diff --git a/4.monitor/prometheus/2.configmap.yml b/4.monitor/prometheus/2.configmap.yml new file mode 100644 index 0000000..32cb637 --- /dev/null +++ b/4.monitor/prometheus/2.configmap.yml @@ -0,0 +1,59 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-config + namespace: monitor +data: + prometheus.yml: | + global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + alerting: + alertmanagers: + - static_configs: + - targets: + - 10.10.10.160:9093 + rule_files: + - "/etc/prometheus/rules.yml" + scrape_configs: + - job_name: "prometheus" + static_configs: + - targets: ["localhost:9090"] + - job_name: "alertmanager" + static_configs: + - targets: ["10.10.10.160:9093"] + - job_name: 'consul-app-prometheus' + metrics_path: "/actuator/prometheus" + consul_sd_configs: # 配置基于consul的服务发现 + - server: 172.16.5.37:8500 # consul + refresh_interval: 10s # 刷新间隔 + services: ['application'] + + relabel_configs: # 把__meta_consul_service_id 映射主机名 + - source_labels: [__meta_consul_service_id] + separator: ; + regex: (.*) + target_label: 'instance' + replacement: $1 + action: replace + - source_labels: [__meta_consul_service_address] #映射主机IP + separator: ; + regex: (.*) + target_label: 'ip' + replacement: $1 + action: replace + - source_labels: [__meta_consul_service] + target_label: 'service' + + rules.yml: | + groups: + - name: jvm告警 + rules: + - alert: 频繁FullGC + expr: increase(jvm_gc_pause_seconds_sum[5m]) > 1 + for: 1m + labels: + severity: 警告 + annotations: + summary: "检测到频繁的 Full GC" + description: "{{$labels.instance}} 正在经历频繁的 Full GC 事件" \ No newline at end of file diff --git a/4.monitor/prometheus/3.deploy.yml b/4.monitor/prometheus/3.deploy.yml new file mode 100644 index 0000000..f934e52 --- /dev/null +++ b/4.monitor/prometheus/3.deploy.yml @@ -0,0 +1,50 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: prometheus + namespace: monitor + labels: + app: prometheus +spec: + selector: + matchLabels: + app: prometheus + template: + metadata: + labels: + app: prometheus + spec: + securityContext: #指定运行的用户为root + runAsUser: 0 + serviceAccountName: prometheus + containers: + - image: prom/prometheus:v2.30.2 + name: prometheus + args: + - "--config.file=/etc/prometheus/prometheus.yml" #通过volume挂载prometheus.yml + - "--storage.tsdb.path=/prometheus" #通过vlolume挂载目录/prometheus + - "--storage.tsdb.retention.time=24h" + - "--web.enable-admin-api" #控制对admin HTTP API的访问,其中包括删除时间序列等功能 + - "--web.enable-lifecycle" #支持热更新,直接执行localhost:9090/-/reload立即生效 + ports: + - containerPort: 9090 + name: http + volumeMounts: + - mountPath: "/etc/prometheus" + name: config-volume + - mountPath: "/prometheus" + name: data + resources: + limits: + cpu: "0.5" + memory: 2Gi + requests: + cpu: 100m + memory: 1Gi + volumes: + - name: data + persistentVolumeClaim: + claimName: prometheus-data-pvc #本地存储 + - name: config-volume + configMap: + name: prometheus-config \ No newline at end of file diff --git a/4.monitor/prometheus/consul.py b/4.monitor/prometheus/consul.py new file mode 100644 index 0000000..fed84bc --- /dev/null +++ b/4.monitor/prometheus/consul.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +import requests +import subprocess + +# 获取 node-exporter 的 Pod 名称和节点名称 +pod_info_bytes = subprocess.check_output("kubectl -n monitor get po -owide -l app=node-exporter --no-headers", shell=True) +# 将二进制数据转换为字符串并按行分割 +# pod_info = pod_info_bytes.strip().split('\n') +pod_info = pod_info_bytes.decode('utf-8').strip().split('\n') +# 提取 Pod 名称和节点名称 +pod_names = [info.split()[0] for info in pod_info] +node_ips = [info.split()[5] for info in pod_info] +node_names = [info.split()[6] for info in pod_info] + +# 确保名称数量一致 +if len(pod_names) != len(node_names): + print("Error: 节点数量与 Pod 数量不匹配.") + exit(1) + + + +# 定义 Consul agent 的端点 +consul_endpoint = "http://consul.opsx.top/v1/agent/service/register" + +# 遍历每个 Pod 并将服务注册到 Consul +for pod_name,node_ip, node_name in zip(pod_names, node_ips,node_names): + # print("pod: {} ip:{} node: {}".format(pod_name, node_ip,node_name)) + # 定义服务注册的数据 + data = { + "id": f"{node_name}-{pod_name}", + "name": "node-exporter", + "address": f"{node_ip}", + "port": 9100, + "checks": [{ + "http": f"http://{node_ip}:9100/metrics", + "interval": "5s" + }] + } + print(data) + + # 发送 PUT 请求以注册服务 + response = requests.put(consul_endpoint, json=data) + + # 检查响应状态 + if response.status_code == 200: + print(f"服务 {pod_name} 注册成功.") + else: + print(f"无法注册服务 {pod_name}. 状态码: {response.status_code}") + print(response.text) \ No newline at end of file diff --git a/4.monitor/prometheus/consul_delete_all.py b/4.monitor/prometheus/consul_delete_all.py new file mode 100644 index 0000000..b5c1030 --- /dev/null +++ b/4.monitor/prometheus/consul_delete_all.py @@ -0,0 +1,38 @@ +# 清理失效实例 +import requests + +def deregister_failed_instances(consul_url): + response = requests.get(consul_url) + if response.status_code == 200: + instances = response.json() + for instance in instances: + service_id = instance['ServiceID'] + if service_id: + requests.put(f"http://consul.opsx.top/v1/agent/service/deregister/{service_id}") + print(f"失效实例ID: {service_id}") + else: + print(f"无法从 Consul API 获取数据。状态码:{response.status_code}") + +consul_url = "http://consul.opsx.top/v1/health/state/any" +deregister_failed_instances(consul_url) + + + + + +# curl -X PUT -d '{ +# "id": "host-122", +# "name": "node-exporter", +# "address": "192.168.0.120", +# "port": 9100, +# "tags": ["linux"], +# "meta": { +# "group": "kong", +# "environment": "Pro", +# "project": "API_Platform" +# }, +# "checks": [ { +# "http": "http://192.168.0.120:9100/metrics", +# "interval": "5s" +# }]}' \ +# http://127.0.0.1:8500/v1/agent/service/register \ No newline at end of file diff --git a/4.monitor/prometheus/consul_failed_clean.py b/4.monitor/prometheus/consul_failed_clean.py new file mode 100644 index 0000000..80c225f --- /dev/null +++ b/4.monitor/prometheus/consul_failed_clean.py @@ -0,0 +1,38 @@ +# 清理失效实例 +import requests + +def deregister_failed_instances(consul_url): + response = requests.get(consul_url) + if response.status_code == 200: + instances = response.json() + for instance in instances: + if instance['Status'] == 'critical': # 如果实例状态为严重 + service_id = instance['ServiceID'] + requests.put(f"http://consul.opsx.top/v1/agent/service/deregister/{service_id}") + print(f"失效实例ID: {service_id}") + else: + print(f"无法从 Consul API 获取数据。状态码:{response.status_code}") + +consul_url = "http://consul.opsx.top/v1/health/state/critical" +deregister_failed_instances(consul_url) + + + + + +# curl -X PUT -d '{ +# "id": "host-122", +# "name": "node-exporter", +# "address": "192.168.0.120", +# "port": 9100, +# "tags": ["linux"], +# "meta": { +# "group": "kong", +# "environment": "Pro", +# "project": "API_Platform" +# }, +# "checks": [ { +# "http": "http://192.168.0.120:9100/metrics", +# "interval": "5s" +# }]}' \ +# http://127.0.0.1:8500/v1/agent/service/register \ No newline at end of file diff --git a/4.monitor/pvc.yml b/4.monitor/pvc.yml new file mode 100644 index 0000000..e69de29 diff --git a/4.monitor/readme.md b/4.monitor/readme.md new file mode 100644 index 0000000..11ae438 --- /dev/null +++ b/4.monitor/readme.md @@ -0,0 +1,9 @@ + +# monitor diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 0000000..0e4d9f3 --- /dev/null +++ b/README.md @@ -0,0 +1,12 @@ +# Opsbase.cn_Full_container_scheme + +- 基础环境和组件部署 +- kubernets 基础 +- docker-compose all in one (容器化一键部署开发环境) +- 基于全套阿里云微服务框架serverless实践 +- 基于AWS EKS Fargate 实践 +- 通用CI/CD 实践 +- 基于飞书&Lark 审批流部署 +- 脚本&工具链开发 + + diff --git a/deploy-kubernetes-helm/README.md b/deploy-kubernetes-helm/README.md new file mode 100644 index 0000000..e27580c --- /dev/null +++ b/deploy-kubernetes-helm/README.md @@ -0,0 +1,19 @@ + +# helm + +```bash +# k8s-master节点安装 +wget https://get.helm.sh/helm-v3.14.2-linux-amd64.tar.gz +tar -zxf helm-*-linux-amd64.tar.gz +cp linux-amd64/helm /usr/local/bin/ + +# 验证安装 +helm version +``` \ No newline at end of file diff --git a/deploy-kubernetes-helm/helm仓库.md b/deploy-kubernetes-helm/helm仓库.md new file mode 100644 index 0000000..b66252c --- /dev/null +++ b/deploy-kubernetes-helm/helm仓库.md @@ -0,0 +1,14 @@ +# 部署仓库 + +```bash +## 添加仓库 +helm repo add harbor https://helm.goharbor.io +helm repo add bitnami https/charts.bitnami.com/bitnami +helm repo add microsoft http://mirror.azure.cn/kubernetes/charts + +## 更新仓库 +helm repo update + +## 移除仓库 +helm repo remove xxxx +``` diff --git a/deploy-kubernetes-helm/常用命令.md b/deploy-kubernetes-helm/常用命令.md new file mode 100644 index 0000000..b819d4a --- /dev/null +++ b/deploy-kubernetes-helm/常用命令.md @@ -0,0 +1,26 @@ + + +# 命令 + +## nfs 命令行 + +```bash +mkdir /data/nfs -p +helm install test-storageclass \ +Microsoft/nfs-client-provisioner \ +--set nfs.server=192.168.66.161 \ +--set nfs.path=/data/nfs +``` + +## 单机 minio + +```bash +helm install stable/minio +helm install minio --namespace minio \ +--set mode=distributed,replicas=3 stable/minio +``` diff --git a/deploy/jenkins-ci.md b/deploy/jenkins-ci.md new file mode 100644 index 0000000..759363e --- /dev/null +++ b/deploy/jenkins-ci.md @@ -0,0 +1,18 @@ +# Jenkins ci + +## 部署 + +- 基于镜像部署 +- 基于镜像+deployment yml + +## 图 + +![jenkins-demo](/docs/image/jenkins1.png) + +--- + +![jenkins-demo](/docs/image/jenkins2.png) + +--- + +![jenkins-demo](/docs/image/jenkins3.png) diff --git a/docker-compose-all-one/README.md b/docker-compose-all-one/README.md new file mode 100644 index 0000000..86ff480 --- /dev/null +++ b/docker-compose-all-one/README.md @@ -0,0 +1,61 @@ + + +# docker-compose-all-one + +## docker + +```bash +# 一键安装Docker +curl -sSL https://get.docker.com/ | sh +``` + + +## 二进制 docker-compose + +下载版本: https://github.com/docker/compose/releases + +```bash +wget https://github.com/docker/compose/releases/download/v2.14.0/docker-compose-`uname -s`-`uname -m` + +mv docker-compose* /usr/local/sbin/docker-compose +chmod +x /usr/local/sbin/docker-compose +docker-compose version # 查看docker-compose版本 +``` + +## 存储路径 + +```text +/data/docker +``` + +## 服务一键安装指令 +```bash +# 拉起所有基础服务 +docker-compose up -d +# 停止所有服务 +docker-compose down + +# 停指定服务 +docker-compose down kafka +# 安装指定服务 +docker-compose up -d mysql +``` + +## 服务清单 + +| 服务 | 连接地址 | 账号密码 | +| ------------ | ----------------------- | -------------------------------------------------------------------------------- | +|mysql | 端口:3306 | root/123456 | +|redis | 端口:6379 |123456 | +|kafka | 端口:9092 |- | +|kafka-manager | 端口:9000 |- | +|elasticsearch | 端口:9200 |- | +|elasticsearch-head | 管理后台: http://ip:9100 |- | +|nacos | 管理后台: http://ip:8848/nacos |-| +|grafana | 管理后台: http://ip:3000 |admin/admin | + diff --git a/docker-compose-all-one/config/elasticsearch/elasticsearch.yml b/docker-compose-all-one/config/elasticsearch/elasticsearch.yml new file mode 100644 index 0000000..8ea46f2 --- /dev/null +++ b/docker-compose-all-one/config/elasticsearch/elasticsearch.yml @@ -0,0 +1,11 @@ +cluster.name: "es-1" +network.host: 0.0.0.0 +http.port: 9200 +http.cors.enabled: true +http.cors.allow-origin: "*" # 允许跨域访问 + + +# node.master: true +# node.data: true +# path.data: /usr/share/elasticsearch/data +# path.logs: /usr/share/elasticsearch/logs \ No newline at end of file diff --git a/docker-compose-all-one/config/history/old.yml b/docker-compose-all-one/config/history/old.yml new file mode 100644 index 0000000..58c41e3 --- /dev/null +++ b/docker-compose-all-one/config/history/old.yml @@ -0,0 +1,51 @@ + ########## rabbitmq ########## + rabbitmq: + image: rabbitmq:management + container_name: rabbitmq + hostname: rabbitmq + restart: unless-stopped + privileged: true + ports: + - 4369:4369 + - 5671:5671 + - 5672:5672 + - 15672:15672 + - 25672:25672 + environment: + RABBITMQ_DEFAULT_VHOST: / + RABBITMQ_DEFAULT_USER: admin + RABBITMQ_DEFAULT_PASS: admin + RABBITMQ_LOGS: /var/lib/rabbitmq/rabbitmq.log + RABBITMQ_SASL_LOGS: /var/lib/rabbitmq/rabbitmq-sasl.log + RABBITMQ_ERLANG_COOKIE: LZJADKXKLULIXFKAALGX + logging: + driver: "json-file" + options: + max-size: "50m" + max-file: "3" + volumes: + - /etc/localtime:/etc/localtime:ro + - /etc/hosts:/etc/hosts:ro + - /data/docker/rabbitmq:/var/lib/rabbitmq + network_mode: "host" + + + +########## kafka-manager ########## + kafka-manager: + image: sheepkiller/kafka-manager + container_name: kafka-manager + hostname: kafka-manager + restart: always + ports: + - 9000:9000 + network_mode: 'host' + environment: + ZK_HOSTS: localhost:2181 + APPLICATION_SECRET: letmein + KAFKA_MANAGER_AUTH_ENABLED: "true" + KAFKA_MANAGER_USERNAME: admin + KAFKA_MANAGER_PASSWORD: admin + depends_on: + - zookeeper + - kafka \ No newline at end of file diff --git a/docker-compose-all-one/config/mysql/my.cnf b/docker-compose-all-one/config/mysql/my.cnf new file mode 100644 index 0000000..fa1589c --- /dev/null +++ b/docker-compose-all-one/config/mysql/my.cnf @@ -0,0 +1,19 @@ +[mysqld] +lower_case_table_names=1 +#datadir=/var/lib/mysql/data +#basedir=/var/lib/mysql +socket=/var/lib/mysql/mysql.sock +user=mysql +default-storage-engine=INNODB +character-set-server=utf8mb4 +collation-server=utf8mb4_unicode_ci + +# 日志配置 +log_error = /var/lib/mysql/error.log +slow_query_log = 1 +slow_query_log_file = /var/lib/mysql/slow-query.log +# 优化参数 +innodb_buffer_pool_size = 2G # 内存缓冲池的大小建议为系统内存的 50-80% +innodb_log_buffer_size = 8M # 默认值8MB ,写入负载较大时可提高 +skip_name_resolve = 1 + diff --git a/docker-compose-all-one/config/mysql57/my.cnf b/docker-compose-all-one/config/mysql57/my.cnf new file mode 100644 index 0000000..5287d7a --- /dev/null +++ b/docker-compose-all-one/config/mysql57/my.cnf @@ -0,0 +1,19 @@ +[mysqld] +lower_case_table_names=1 +#datadir=/var/lib/mysql/data +#basedir=/var/lib/mysql +socket=/var/lib/mysql/mysql.sock +user=mysql +default-storage-engine=INNODB +character-set-server=utf8 +sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION +symbolic-links=0 +[mysqld_safe] +log-error=/var/log/mysqld.log +pid-file=/var/run/mysqld/mysqld.pid +[client] +default-character-set=utf8 +port=3306 +socket=/var/lib/mysql/mysql.sock +[mysql] +default-character-set=utf8 \ No newline at end of file diff --git a/docker-compose-all-one/config/redis/redis.conf b/docker-compose-all-one/config/redis/redis.conf new file mode 100644 index 0000000..972fb93 --- /dev/null +++ b/docker-compose-all-one/config/redis/redis.conf @@ -0,0 +1,64 @@ +port 6379 # 监听端口 +bind 0.0.0.0 # 监听地址 +protected-mode no # Protected-mode 禁止公网访问redis cache +daemonize yes # 是否以守护进程方式启动 +timeout 300 # 客户端连接的超时时间,单位为秒,超时后会关闭连接,0永不超时 +tcp-keepalive 60 # 服务端主动向空闲的客户端发起ack请求,以判断连接是否有效 +databases 20 # 设置数据库数量,默认数据库为0 +maxclients 2000 # 设置最大连接数,0为不限制 +slowlog-log-slower-than 10000 +slowlog-max-len 128 +tcp-backlog 511 +supervised no +pidfile /var/run/redis_6379.pid +loglevel notice #日志级别,分别有: +# debug :适用于开发和测试 +# verbose :更详细信息 +# notice :适用于生产环境 +# warning :只记录警告或错误信息 +logfile "/var/redis/log/6379.log" +save 900 1 +save 300 10 +save 60 10000 +stop-writes-on-bgsave-error yes +rdbcompression yes +rdbchecksum yes +dbfilename dump.rdb +dir ./ +slave-serve-stale-data yes +slave-read-only yes +repl-diskless-sync no +repl-diskless-sync-delay 5 +repl-disable-tcp-nodelay no +slave-priority 100 +maxmemory-policy allkeys-lru +appendonly yes # AOF持久化,是否记录更新操作日志,默认redis是异步(快照)把数据写入本地磁盘 +appendfilename "6379.aof" # 指定更新日志文件名 +appendfsync everysec +# everysec 默认方式,每秒同步一次到appendonly.aof +# always 每次有数据发生变化时都会写入 +# no 不同步,数据不会持久化 +no-appendfsync-on-rewrite no # 当AOF日志文件即将增长到指定百分比时,redis通过调用BGREWRITEAOF是否自动重写AOF日志文件 +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb +aof-load-truncated yes +lua-time-limit 5000 +cluster-enabled yes +cluster-config-file nodes-6379.conf +latency-monitor-threshold 0 +notify-keyspace-events "" +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 +list-max-ziplist-size -2 +list-compress-depth 0 +set-max-intset-entries 512 +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 +hll-sparse-max-bytes 3000 +activerehashing yes +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit slave 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 +hz 10 +aof-rewrite-incremental-fsync yes +requirepass 123456 diff --git a/docker-compose-all-one/config/rocketmq/broker.conf b/docker-compose-all-one/config/rocketmq/broker.conf new file mode 100644 index 0000000..55b803f --- /dev/null +++ b/docker-compose-all-one/config/rocketmq/broker.conf @@ -0,0 +1,17 @@ +brokerClusterName=DefaultCluster +brokerName=broker-a +brokerId=0 +# 修改为你宿主机的 IP +brokerIP1=0.0.0.0 +defaultTopicQueueNums=4 +autoCreateTopicEnable=true +autoCreateSubscriptionGroup=true +listenPort=10911 +deleteWhen=04 +fileReservedTime=120 +mapedFileSizeCommitLog=1073741824 +mapedFileSizeConsumeQueue=300000 +diskMaxUsedSpaceRatio=88 +maxMessageSize=65536 +brokerRole=ASYNC_MASTER +flushDiskType=ASYNC_FLUSH \ No newline at end of file diff --git a/docker-compose-all-one/docker-compose.yml b/docker-compose-all-one/docker-compose.yml new file mode 100644 index 0000000..2601250 --- /dev/null +++ b/docker-compose-all-one/docker-compose.yml @@ -0,0 +1,44 @@ +version: '3' +services: + nacos: + extends: + file: services/nacos.yml + service: nacos + redis: + extends: + file: services/redis.yml + service: redis + mysql: + extends: + file: services/mysql.yml + service: mysql + kafka: + extends: + file: services/kafka.yml + service: kafka + kafka-manager: + extends: + file: services/kafka-manager.yml + service: kafka-manager + zookeeper: + extends: + file: services/zookeeper.yml + service: zookeeper + elasticsearch: + extends: + file: services/elasticsearch.yml + service: elasticsearch + elasticsearch-head: + extends: + file: services/elasticsearch-head.yml + service: elasticsearch-head + grafana: + extends: + file: services/grafana.yml + service: grafana + openldap: + extends: + file: services/openldap.yml + service: openldap + + \ No newline at end of file diff --git a/docker-compose-all-one/docs/es.md b/docker-compose-all-one/docs/es.md new file mode 100644 index 0000000..c34b206 --- /dev/null +++ b/docker-compose-all-one/docs/es.md @@ -0,0 +1,7 @@ +# es + +```bash +# 查看es节点状态 +curl -XGET 'http://127.0.0.1:9200/_cat/nodes?v' +``` + diff --git a/docker-compose-all-one/docs/grafana.md b/docker-compose-all-one/docs/grafana.md new file mode 100644 index 0000000..5c3c7c7 --- /dev/null +++ b/docker-compose-all-one/docs/grafana.md @@ -0,0 +1,5 @@ +# Grafana + +```bash +docker exec -it grafana grafana-cli plugins install alexanderzobnin-zabbix-app +``` \ No newline at end of file diff --git a/docker-compose-all-one/docs/kafka.md b/docker-compose-all-one/docs/kafka.md new file mode 100644 index 0000000..63ebea4 --- /dev/null +++ b/docker-compose-all-one/docs/kafka.md @@ -0,0 +1,12 @@ +# 命令 + +```bash +# 查看topics列表 +docker exec -it kafka kafka-topics.sh --list --bootstrap-server localhost:9092 +# 创建topics +docker exec -it kafka kafka-topics.sh --create --bootstrap-server localhost:9092 --replication-factor 1 --partitions 3 --topic test-data1 + +# 旧版本 +kafka-topics.sh --list --zookeeper zk-1:2181 +kafka-topics.sh --zookeeper zk-1:2181 --create --replication-factor 1 --partitions 3 --topic pro-dmin +``` \ No newline at end of file diff --git a/docker-compose-all-one/services/docker-compose-bak.yml b/docker-compose-all-one/services/docker-compose-bak.yml new file mode 100644 index 0000000..df1d2b3 --- /dev/null +++ b/docker-compose-all-one/services/docker-compose-bak.yml @@ -0,0 +1,69 @@ +# |rabbitmq | 端口:5672 |- | +# |rabbitmq-management | 管理后台: http://ip:15672 |guest/guest | +# |rocketmq | 端口: 9876 | - | +# |rocketmq-console | 管理后台: http://ip:28080 |- | + +version: '3' +services: + ########## rabbitmq ########## + rabbitmq: + image: rabbitmq:management + container_name: rabbitmq + hostname: rabbitmq + restart: unless-stopped + privileged: true + ports: + - 4369:4369 + - 5671:5671 + - 5672:5672 + - 15672:15672 + - 25672:25672 + environment: + RABBITMQ_DEFAULT_VHOST: / + RABBITMQ_DEFAULT_USER: guest + RABBITMQ_DEFAULT_PASS: guest + RABBITMQ_LOGS: /var/lib/rabbitmq/rabbitmq.log + RABBITMQ_SASL_LOGS: /var/lib/rabbitmq/rabbitmq-sasl.log + RABBITMQ_ERLANG_COOKIE: LZJADKXKLULIXFKAALGX + logging: + driver: "json-file" + options: + max-size: "50m" + max-file: "3" + volumes: + - /etc/localtime:/etc/localtime:ro + - /etc/hosts:/etc/hosts:ro + - /data/docker/rabbitmq:/var/lib/rabbitmq + network_mode: "host" + ########## rocketmq ########## + rocketmq: + image: foxiswho/rocketmq:4.7.0 + container_name: rocketmq + ports: + - 9876:9876 + environment: + JAVA_OPT: -server -Xms256m -Xmx256m + command: sh mqnamesrv + rocketmq-broker: + image: foxiswho/rocketmq:4.7.0 + container_name: rocketmq-broker + ports: + - 10911:10911 + - 10909:10909 + volumes: + - ./config/rocketmq/broker.conf:/usr/local/dockerCompose/rocketmq/conf/broker.conf + environment: + JAVA_OPT_EXT: -server -Xms256m -Xmx256m -Xmn128m + NAMESRV_ADDR: 127.0.0.1:9876 + command: sh mqbroker -n 127.0.0.1:9876 -c /usr/local/dockerCompose/rocketmq/conf/broker.conf + rocketmq-console: + image: styletang/rocketmq-console-ng + container_name: rocketmq-console + ports: + - 28080:8080 + environment: + JAVA_OPTS: -Drocketmq.namesrv.addr=127.0.0.1:9876 -Dcom.rocketmq.sendMessageWithVIPChannel=falses + depends_on: # 依赖情况,需要先确保依赖服务 + - rocketmq + - rocketmq-broker + diff --git a/docker-compose-all-one/services/elasticsearch-head.yml b/docker-compose-all-one/services/elasticsearch-head.yml new file mode 100644 index 0000000..ce977e2 --- /dev/null +++ b/docker-compose-all-one/services/elasticsearch-head.yml @@ -0,0 +1,11 @@ +version: '3' +services: + elasticsearch-head: # web-ui + image: mobz/elasticsearch-head:5-alpine + container_name: elasticsearch-head + hostname: elasticsearch-head + restart: always + ports: + - "9100:9100" + depends_on: # 依赖情况,需要先确保依赖服务 + - elasticsearch \ No newline at end of file diff --git a/docker-compose-all-one/services/elasticsearch.yml b/docker-compose-all-one/services/elasticsearch.yml new file mode 100644 index 0000000..d667813 --- /dev/null +++ b/docker-compose-all-one/services/elasticsearch.yml @@ -0,0 +1,16 @@ +version: '3' +services: +########## elasticsearc ########## + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:7.17.0 + container_name: elasticsearch # 容器名 + hostname: elasticsearch # 主机名 + restart: unless-stopped # 容器停止时重新启动 + environment: + - discovery.type=single-node + - ES_JAVA_OPTS=-Xms512m -Xmx512m + ports: + - 9200:9200 + volumes: + - ../config/elasticsearch/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml + network_mode: "host" diff --git a/docker-compose-all-one/services/grafana.yml b/docker-compose-all-one/services/grafana.yml new file mode 100644 index 0000000..929d9ca --- /dev/null +++ b/docker-compose-all-one/services/grafana.yml @@ -0,0 +1,13 @@ +version: '3' +services: + ########## grafana ########## + grafana: + image: grafana/grafana:9.4.0 + container_name: grafana # 容器名 + hostname: grafana # 主机名 + restart: unless-stopped # 容器停止时重新启动 + ports: + - 3000:3000 + volumes: + - /data/docker/grafana-data:/var/lib/grafan + network_mode: 'host' \ No newline at end of file diff --git a/docker-compose-all-one/services/kafka-manager.yml b/docker-compose-all-one/services/kafka-manager.yml new file mode 100644 index 0000000..afb863b --- /dev/null +++ b/docker-compose-all-one/services/kafka-manager.yml @@ -0,0 +1,14 @@ +version: '3' +services: + ########## kafka ########## + kafka-manager: + image: sheepkiller/kafka-manager:latest + container_name: kafka-manager + restart: unless-stopped + ports: + - "9000:9000" + environment: + ZK_HOSTS: "127.0.0.1:2181" + network_mode: 'host' + depends_on: + - zookeeper \ No newline at end of file diff --git a/docker-compose-all-one/services/kafka.yml b/docker-compose-all-one/services/kafka.yml new file mode 100644 index 0000000..661a708 --- /dev/null +++ b/docker-compose-all-one/services/kafka.yml @@ -0,0 +1,25 @@ +version: '3' +services: +########## Kafka ########## + kafka: + image: 'bitnami/kafka:latest' + container_name: kafka # 容器名 + hostname: kafka # 主机名 + restart: unless-stopped # 容器停止时重新启动 + ports: + - '9092:9092' # 将 Kafka 的 9092 端口映射到主机的 9092 端口 + environment: + KAFKA_BROKER_ID: 1 # 设置 Kafka Broker ID 为 1 + KAFKA_CFG_LISTENERS: PLAINTEXT://:9092 # Kafka 监听器配置 + KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://127.0.0.1:9092 # Kafka 广播监听器配置 + KAFKA_CFG_ZOOKEEPER_CONNECT: 127.0.0.1:2181 # Kafka 连接 Zookeeper 的配置 + ALLOW_PLAINTEXT_LISTENER: 'yes' # 允许使用明文监听器 + KAFKA_LOG_RETENTION_HOURS: 120 # 日志保留时间设置为 120 小时 + KAFKA_MESSAGE_MAX_BYTES: 10000000 # 消息最大字节数设置为 10000000 + KAFKA_REPLICA_FETCH_MAX_BYTES: 10000000 # 复制获取最大字节数设置为 10000000 + KAFKA_GROUP_MAX_SESSION_TIMEOUT_MS: 60000 # 消费者组最大会话超时时间设置为 60000 毫秒 + KAFKA_NUM_PARTITIONS: 1 # 分区数量设置为 1 + KAFKA_DELETE_RETENTION_MS: 1000 # 删除保留时间设置为 1000 毫秒 + network_mode: 'host' # 使用主机网络模式 + depends_on: + - zookeeper # 依赖于 zookeeper 服务 \ No newline at end of file diff --git a/docker-compose-all-one/services/mysql.yml b/docker-compose-all-one/services/mysql.yml new file mode 100644 index 0000000..0e467b5 --- /dev/null +++ b/docker-compose-all-one/services/mysql.yml @@ -0,0 +1,19 @@ +version: '3' +services: + ########## mysql ########## + mysql: + image: mysql:latest + container_name: mysql + hostname: mysql + restart: unless-stopped + privileged: true + command: --default-authentication-plugin=mysql_native_password + ports: + - 3306:3306 + environment: + MYSQL_ROOT_PASSWORD: 123456 + volumes: + - /data/docker/mysql/data:/var/lib/mysql + - /data/docker/mysql/mysql-files:/var/lib/mysql-files + - ../config/mysql/my.cnf:/etc/mysql/my.cnf + network_mode: "host" # 使用主机网络模式 \ No newline at end of file diff --git a/docker-compose-all-one/services/nacos.yml b/docker-compose-all-one/services/nacos.yml new file mode 100644 index 0000000..cfddfdc --- /dev/null +++ b/docker-compose-all-one/services/nacos.yml @@ -0,0 +1,18 @@ +version: '3' +services: + ########## nacos ########## + nacos: + image: nacos/nacos-server:latest + container_name: nacos + hostname: nacos + # restart: unless-stopped + ports: + - 8848:8848 + environment: + MODE: standalone + JVM_XMS: 256m + JVM_XMX: 512m + volumes: + - /data/docker/nacos/data:/home/nacos/data + - /data/docker/nacos/logs:/home/nacos/logs + network_mode: 'host' # 网络模式 \ No newline at end of file diff --git a/docker-compose-all-one/services/openldap.yml b/docker-compose-all-one/services/openldap.yml new file mode 100644 index 0000000..df95e52 --- /dev/null +++ b/docker-compose-all-one/services/openldap.yml @@ -0,0 +1,19 @@ +version: '3' +services: +########## ldap server ########## + openldap: + image: osixia/openldap:latest + container_name: ldap-service + hostname: ldap-service + restart: unless-stopped + ports: + - 389:389 + - 636:636 + environment: + LDAP_ORGANISATION: opsx + LDAP_DOMAIN: "opsx.vip" + LDAP_BASE_DN: "dc=opsx,dc=vip" + LDAP_ADMIN_PASSWORD: "opsx.vip" + volumes: + - /data/docker/slapd/database:/var/lib/ldap + - /data/docker/slapd/config:/etc/ldap/slapd.d diff --git a/docker-compose-all-one/services/redis.yml b/docker-compose-all-one/services/redis.yml new file mode 100644 index 0000000..fb90af7 --- /dev/null +++ b/docker-compose-all-one/services/redis.yml @@ -0,0 +1,18 @@ +version: '3' +services: + + ########## redis ########## + redis: + image: redis + container_name: redis + hostname: redis + restart: unless-stopped + privileged: true + ports: + - 6379:6379 + command: redis-server --requirepass 123456 --dir /data + volumes: + - /etc/localtime:/etc/localtime:ro + - /data/docker/redis/data:/data + - ../config/redis/redis.conf:/etc/redis/redis.conf + network_mode: "host" # 方便容器与容器访问时直接使用宿主机的网络 \ No newline at end of file diff --git a/docker-compose-all-one/services/zookeeper.yml b/docker-compose-all-one/services/zookeeper.yml new file mode 100644 index 0000000..63c33f1 --- /dev/null +++ b/docker-compose-all-one/services/zookeeper.yml @@ -0,0 +1,17 @@ +version: '3' +services: + ########## zk ########## + zookeeper: + image: bitnami/zookeeper:latest + container_name: zookeeper + hostname: zookeeper + restart: unless-stopped + ports: + - "2181:2181" + environment: + - ALLOW_ANONYMOUS_LOGIN=yes # 匿名登录--必须开启 + network_mode: 'host' + # volumes: # 需要权限 chmod 777 + # - /data/docker/zookeeper/data:/bitnami/zookeeper/data + # - /data/docker/zookeeper/logs:/bitnami/zookeeper/logs + \ No newline at end of file diff --git a/docker-compose-yaml/base.sh b/docker-compose-yaml/base.sh new file mode 100644 index 0000000..e93431a --- /dev/null +++ b/docker-compose-yaml/base.sh @@ -0,0 +1,93 @@ +#!/usr/bin/env bash +### +# @Author: Logan.Li +# @Gitee: https://gitee.com/attacker +# @email: admin@attacker.club +# @Date: 2023-10-28 00:01:57 +# @LastEditTime: 2023-10-28 01:30:56 +# @Description: +### + +CURRENT_DIR=$( + cd "$(dirname "$0")" + pwd +) + +function logger() { + case "$1" in + "error" | "red") + echo -e "\e[1;31m$2\e[0m" + echo "$2" >>${CURRENT_DIR}/install.log + exit 1 + ;; + "warning" | "yellow") + echo -e "\e[1;33m$2\e[0m" + echo "$2" >>${CURRENT_DIR}/install.log + ;; + "success" | "green") + echo -e "\e[1;32m$2\e[0m" + echo "$2" >>${CURRENT_DIR}/install.log + ;; + "info" | "blue") + echo -e "\e[1;34m$2\e[0m" + echo "$2" >>${CURRENT_DIR}/install.log + ;; + esac +} + +# 判断是否root用户 +if [ $(id -u) -ne 0 ]; then + logger error "########## This script must be run as root !!! ##########" +fi + +# 环境检查 +echo "time: $(date)" + +if which getenforce && [ $(getenforce) == "Enforcing" ]; then + logger info "... 关闭 SELINUX" + setenforce 0 + sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config +fi + +>${CURRENT_DIR}/install.log +logger info "======================= 开始安装 =======================" 2>&1 | tee -a ${CURRENT_DIR}/install.log + +logger info "检查 是否存在离线包 [offline.tar.gz]" +if [ -f ${CURRENT_DIR}/offline.tar.gz ]; then + tar zxf offline.tar.gz + chmod +x docker-install.sh && ./docker-install.sh + logger success "离线docker 安装成功" + \cp docker-compose /usr/local/sbin/docker-compose + chmod +x /usr/local/sbin/docker-compose + logger success "离线ocker-compose 安装成功" +fi + +# 检查是否已经安装 Docker +if [ -x "$(command -v docker)" ]; then + logger info "Docker 已经安装" +else + # 下载 Docker 二进制安装包 + curl -fsSL https://get.docker.com | sh + logger success "安装 Docker" +fi + +# 检查是否已经安装 Docker Compose +if [ -x "$(command -v docker-compose)" ]; then + logger info "Docker Compose 已经安装" +else + # 下载 Docker Compose 二进制安装包 + curl -L https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m) -o /usr/local/sbin/docker-compose + # 设置可执行权限 + chmod +x /usr/local/sbin/docker-compose + docker-compose version # 查看docker-compose版本 + logger success "安装 Docker Compose" +fi + +# 检查是否有离线镜像 +if [ -f ${CURRENT_DIR}/image.tar.gz ]; then + logger info "检查到离线镜像 [image.tar.gz]" + cat image.tar.gz | gzip -d | docker load + logger success "完成镜像恢复" +fi + +logger info "开始服务部署 ... [xx.yml]" diff --git a/docker-compose-yaml/readme.md b/docker-compose-yaml/readme.md new file mode 100644 index 0000000..313c7d5 --- /dev/null +++ b/docker-compose-yaml/readme.md @@ -0,0 +1,22 @@ + +# 命令 + + + + +## 离线镜像使用 +```bash +# 备份镜像 +docker save -o images.tar.gz image1 image2 image3 + +# 恢复镜像 +cat image.tar.gz | gzip -d | docker load +``` + diff --git a/docker-compose-yaml/zabbix-docker.sh b/docker-compose-yaml/zabbix-docker.sh new file mode 100644 index 0000000..5d5df36 --- /dev/null +++ b/docker-compose-yaml/zabbix-docker.sh @@ -0,0 +1,81 @@ +#!/bin/bash +### +# @Author: admin@attacker.club +# @Date: 2022-09-29 14:35:55 +# @LastEditTime: 2023-10-28 00:59:41 +# @Description: +### + +docker stop zabbix-mysql +docker stop zabbix-web +docker stop zabbix-server +docker rm zabbix-mysql +docker rm zabbix-web +docker rm zabbix-server + +## DB服务 +dataDir="/opt/docker-data/mysql" +yum remove mariadb* -y # 卸载默认mariadb +rpm -ivh https://repo.mysql.com/mysql80-community-release-el7.rpm +yum install mysql-community-client -y # 安装mysql client + +if [ ! "$rootPassword" ]; then + rootPassword=$(cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 12) + zbxPassword=$(cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 12) + echo "rootPassword=$rootPassword" >>~/.bashrc + echo "zbxPassword=$zbxPassword" >>~/.bashrc +fi + +echo "> 启动mysql" +docker run \ + --restart always \ + -d -p 3306:3306 \ + --name zabbix-mysql \ + --hostname zabbix-mysql \ + -e MYSQL_ROOT_PASSWORD=${rootPassword} \ + -e MYSQL_USER="zabbix" \ + -e MYSQL_PASSWORD=${zbxPassword} \ + -e MYSQL_DATABASE="zabbix" \ + -v $dataDir:/var/lib/mysql \ + mysql:8 --character-set-server=utf8 --collation-server=utf8_bin + +sleep 10 +## 启动zabbix server +echo "> 启动zabbix server" +docker run -d -p 10051:10051 \ + --restart always \ + --name zabbix-server \ + --hostname zabbix-server \ + --link zabbix-mysql:mysql \ + -e DB_SERVER_HOST="mysql" \ + -e MYSQL_USER="zabbix" \ + -e MYSQL_PASSWORD="${zbxPassword}" \ + -v /etc/localtime:/etc/localtime:ro \ + -v /data/docker/zabbix/alertscripts:/usr/lib/zabbix/alertscripts \ + -v /data/docker/zabbix/externalscripts:/usr/lib/zabbix/externalscripts \ + zabbix/zabbix-server-mysql:ubuntu-6.0-latest +# zabbix/zabbix-server-mysql:latest + +## 启动zabbix web +echo "> 启动zabbix web" +docker run -d -p 81:8080 \ + --restart always \ + --name zabbix-web \ + --hostname zabbix-web \ + --link zabbix-mysql:mysql \ + --link zabbix-server:zabbix-server \ + -e DB_SERVER_HOST="mysql" \ + -e MYSQL_USER="zabbix" \ + -e MYSQL_PASSWORD="${zbxPassword}" \ + -e ZBX_SERVER_HOST="zabbix-server" \ + -e PHP_TZ="Asia/Shanghai" \ + zabbix/zabbix-web-nginx-mysql:6.0-alpine-latest +# zabbix/zabbix-web-nginx-mysql:latest + +sleep 3 +echo "mysql -h127.0.0.1 -uroot -p$rootPassword" >mysql.txt +echo "mysql -h127.0.0.1 -uzabbix -p$zbxPassword" >>mysql.txt +echo "http://zabbix 账号: Admin / zabbix" +## sql添加远程账号 +# CREATE USER 'admin'@'%' ; +# GRANT ALL ON *.* TO 'admin'@'%' IDENTIFIED WITH mysql_native_password BY 'adminPwd123'; diff --git a/docker-compose-yaml/zabbix.sh b/docker-compose-yaml/zabbix.sh new file mode 100644 index 0000000..e69de29 diff --git a/docker-compose-yaml/zabbix.yml b/docker-compose-yaml/zabbix.yml new file mode 100644 index 0000000..e69de29 diff --git a/docker-compose-yaml/zabbix/mysql.zabbix.yml b/docker-compose-yaml/zabbix/mysql.zabbix.yml new file mode 100644 index 0000000..7f33c86 --- /dev/null +++ b/docker-compose-yaml/zabbix/mysql.zabbix.yml @@ -0,0 +1,112 @@ +version: "3" +services: + + mysql-server: + image: mysql:8.0 + hostname: mysql-server + container_name: mysql-server + restart: always + command: + - mysqld + - --character-set-server=utf8 + - --collation-server=utf8_bin + environment: + - MYSQL_DATABASE=zabbix + - MYSQL_USER=zabbix + - MYSQL_PASSWORD=zabbix@l1ve + - MYSQL_ROOT_PASSWORD=r00t@l1ve + - TZ=Asia/Shanghai + - LANG=en_US.UTF-8 + ports: + - 3306:3306 + volumes: + - /etc/localtime:/etc/localtime + - /data/docker/zabbix/mysql-data/mysql:/var/lib/mysql + expose: + - 3306 + + + zabbix-java-gateway: + image: zabbix/zabbix-java-gateway:latest + container_name: zabbix-java-gateway + restart: always + volumes: + - /etc/localtime:/etc/localtime + ports: + - "10052:10052" + + + zabbix-server: + image: zabbix/zabbix-server-mysql:latest + container_name: zabbix-server + restart: always + ports: + - 10051:10051 + volumes: + - /etc/localtime:/etc/localtime + - /data/docker/zabbix/mysql-data/zabbix:/usr/lib/zabbix + - /data/docker/zabbix/alertscripts:/usr/lib/zabbix/alertscripts + - /data/docker/zabbix/externalscripts:/usr/lib/zabbix/externalscripts + environment: + - DB_SERVER_HOST=mysql-server + - MYSQL_DATABASE=zabbix + - MYSQL_USER=zabbix + - MYSQL_PASSWORD=zabbix@l1ve + - MYSQL_ROOT_PASSWORD=r00t@l1ve + - ZBX_JAVAGATEWAY=zabbix-java-gateway + depends_on: + - mysql-server + - zabbix-java-gateway + links: + - zabbix-java-gateway + - mysql-server + + + zabbix-web: + image: zabbix/zabbix-web-nginx-mysql:latest + container_name: zabbix-web + restart: always + ports: + - 80:8080 + - 443:443 + volumes: + - /etc/localtime:/etc/localtime + # - /data/docker/zabbix/fonts/simkai.ttf:/usr/share/zabbix/assets/fonts/DejaVuSans.ttf + environment: + - PHP_TZ=Asia/Shanghai + # - DB_SERVER_HOST=172.17.0.1 + - DB_SERVER_HOST=zabbix-server + - MYSQL_DATABASE=zabbix + - MYSQL_USER=zabbix + - MYSQL_PASSWORD=zabbix@l1ve + - MYSQL_ROOT_PASSWORD=r00t@l1ve + expose: + - 80 + - 443 + depends_on: + - mysql-server + - zabbix-java-gateway + links: + - zabbix-server + + + zabbix-agent: + image: zabbix/zabbix-agent:latest + container_name: zabbix-agent + restart: always + ports: + - 10050:10050 + environment: + - ZBX_HOSTNAME=zabbix-agent + - ZBX_SERVER_HOST=zabbix-server + - ZBX_SERVER_PORT=10051 + volumes: + - /data/docker/zabbix/zabbix_agentd.d/:/etc/zabbix/zabbix_agentd.d/ + - /data/docker/zabbix/scripts/:/etc/zabbix/scripts/ + expose: + - 10050 + links: + - zabbix-server + + + diff --git a/docker-compose-yaml/zabbix/pg.zabbix.yml b/docker-compose-yaml/zabbix/pg.zabbix.yml new file mode 100644 index 0000000..d2d8acb --- /dev/null +++ b/docker-compose-yaml/zabbix/pg.zabbix.yml @@ -0,0 +1,71 @@ +version: '3' + +services: + zabbix-server: + image: zabbix/zabbix-server-pgsql:latest + container_name: zabbix-server + restart: always + ports: + - "10051:10051" + environment: + - DB_SERVER_HOST=db + - POSTGRES_USER=zabbix + - POSTGRES_PASSWORD=zabbix@l1ve + - POSTGRES_DB=zabbix + - ZBX_JAVAGATEWAY=zabbix-java-gateway + depends_on: + - db + - zabbix-java-gateway + networks: + - zabbix-net + volumes: + - /data/docker/zabbix/alertscripts:/usr/lib/zabbix/alertscripts + - /data/docker/zabbix/externalscripts:/usr/lib/zabbix/externalscripts + + zabbix-web: + image: zabbix/zabbix-web-nginx-pgsql:latest + container_name: zabbix-web + restart: always + ports: + - "80:8080" + environment: + - PHP_TZ=Asia/Shanghai + - DB_SERVER_HOST=db + - POSTGRES_USER=zabbix + - POSTGRES_PASSWORD=zabbix@l1ve + - POSTGRES_DB=zabbix + - ZBX_SERVER_HOST=zabbix-server + depends_on: + - zabbix-server + - zabbix-java-gateway + networks: + - zabbix-net + + db: + image: postgres:alpine + container_name: postgres-db + restart: always + ports: + - "5432:5432" + environment: + - POSTGRES_USER=zabbix + - POSTGRES_PASSWORD=zabbix@l1ve + - POSTGRES_DB=zabbix + - TZ=Asia/Shanghai + volumes: + - /data/docker/zabbix/data:/var/lib/postgresql/data + networks: + - zabbix-net + + + zabbix-java-gateway: + image: zabbix/zabbix-java-gateway:latest + container_name: zabbix-java-gateway + restart: always + ports: + - "10052:10052" + networks: + - zabbix-net + +networks: + zabbix-net: {} \ No newline at end of file diff --git a/docker-compose-yaml/zabbix/readme.md b/docker-compose-yaml/zabbix/readme.md new file mode 100644 index 0000000..8d34e20 --- /dev/null +++ b/docker-compose-yaml/zabbix/readme.md @@ -0,0 +1,62 @@ +# 启动 +docker-compose -f zabbix.yml up -d +# 查看进程 +docker-compose ps +# 停止 +docker-compose -f zabbix.yml down + +# http://192.168.8.110:8088/index.php +Admin/zabbix + +# 如果发现zabbix_server起不来,状态是Exit,查看日志 +docker logs -f mysql-server +docker logs -f zabbix-java-gateway +docker logs -f zabbix-server +docker logs -f zabbix-web-nginx-mysql +docker logs -f zabbix-agent + +# 测试端口 +docker exec -uroot -it zabbix-server bash +nc -vz 172.17.0.1 10050 -w 2 +nc -vz 172.17.0.1 10051 -w 2 + + +# 安装Python3 +```python +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +import json +import sys +import os +import datetime +import http.client + +# 你复制的webhook地址粘贴进url内 +def send_message(key, message): + url = f"https://open.larksuite.com/open-apis/bot/v2/hook/{key}" + payload_message = { + "msg_type": "text", + "content": { + "text": message + } + } + headers = { + 'Content-Type': 'application/json' + } + + conn = http.client.HTTPSConnection("open.larksuite.com") + conn.request("POST", f"/open-apis/bot/v2/hook/{key}", body=json.dumps(payload_message), headers=headers) + response = conn.getresponse() + data = response.read() + conn.close() + return data + +if __name__ == '__main__': + key = sys.argv[1] + text = sys.argv[2] + send_message(key, text) +``` + +# 脚本参数 +{ALERT.SENDTO} {ALERT.MESSAGE} \ No newline at end of file diff --git a/kubernets_api/consul-node.py b/kubernets_api/consul-node.py new file mode 100644 index 0000000..24482d9 --- /dev/null +++ b/kubernets_api/consul-node.py @@ -0,0 +1,105 @@ +import os +import sys +import json +import time +import re +from kubernetes import client, config, stream +import urllib3 +import hashlib +import yaml +import requests + + +# 禁用 InsecureRequestWarning +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + +class KubernetsAPI: + def __init__(self, kubeconfig=None,token=None,apiServer=None): + if os.path.isfile(os.path.expanduser("~/.kube/config")): # 如果存在默认的 kubeconfig 文件,加载本地配置 + print("本地调用") + config.load_kube_config() + elif kubeconfig: + kubeconfig_dict = self.parse_kubeconfig(kubeconfig) # 解析 kubeconfig 内容并创建配置对象 + config.load_kube_config_from_dict(kubeconfig_dict) # 使用 config.load_kube_config_from_dict 创建 kubeconfig 配置对象 + elif token: + kubeconfig = client.Configuration() + kubeconfig.host = apiServer # APISERVER 地址 + kubeconfig.verify_ssl = False + kubeconfig.api_key = {"authorization": f"Bearer {token}"} + client.Configuration.set_default(kubeconfig) + else : + pass + + try: + self.core_api = client.CoreV1Api() + self.apps_api = client.AppsV1Api() + print("api接口调用验证成功.") + except Exception as e: + print(f"api接口调用验证失败.: {str(e)}") + sys.exit("API接口调用验证失败.程序退出.") + + + def parse_kubeconfig(self,kubeconfig_content): + try: + kubeconfig_dict = yaml.safe_load(kubeconfig_content) + return kubeconfig_dict + except yaml.YAMLError as e: + raise Exception(f"Error parsing kubeconfig content: {str(e)}") + + + def update_node_exporter_pods(self): + # 定义要获取的命名空间 + namespace = "monitor" + # 定义 label selector + label_selector = "app=node-exporter" + # 定义 Consul 地址 + consul_url = "http://consul.opsx.top" + + try: + # 调用 Kubernetes API 获取 Pod 列表 + api_response = self.core_api.list_namespaced_pod(namespace, label_selector=label_selector) + for pod in api_response.items: + print("Pod 名称: %s \t IP: %s \t 节点: %s" % (pod.metadata.name, pod.status.pod_ip, pod.spec.node_name)) + # 定义服务注册的数据 + data = { + "id": f"{pod.spec.node_name}-{pod.metadata.name}", + "name": "node-exporter", + "address": f"{pod.status.pod_ip}", + "port": 9100, + "checks": [{ + "http": f"http://{pod.status.pod_ip}:9100/metrics", + "interval": "5s" + }] + } + # 发送 PUT 请求以注册服务 + response = requests.put(f"{consul_url}/v1/agent/service/register", json=data) + + # 检查响应状态 + if response.status_code == 200: + print(f"服务 {pod.spec.node_name} 注册成功.") + else: + print(f"无法注册服务 {pod.spec.node_name}. 状态码: {response.status_code}") + print(response.text) + except Exception as e: + print("获取 Pod 列表时出错: %s" % e) + + def clean_failed_instances(): + time.sleep(3) + response = requests.get(f"{self.consul_url}/v1/health/state/critical") + if response.status_code == 200: + instances = response.json() + for instance in instances: + if instance['Status'] == 'critical': # 如果实例状态为严重 + service_id = instance['ServiceID'] + requests.put(f"{self.consul_url}/v1/agent/service/deregister/{service_id}") + print(f"失效实例ID: {service_id}") + else: + print(f"无法从 Consul API 获取数据。状态码:{response.status_code}") + + + +if __name__ == "__main__": + K8s = KubernetsAPI() + K8s.update_node_exporter_pods() + + diff --git a/kubernets_api/consul-up.py b/kubernets_api/consul-up.py new file mode 100644 index 0000000..685e382 --- /dev/null +++ b/kubernets_api/consul-up.py @@ -0,0 +1,157 @@ + +import os +import sys +import json +import time +import urllib3 +import requests +from kubernetes import client, config + +# 禁用 InsecureRequestWarning +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + +class KubernetesAPI: + def __init__(self, kubeconfig=None,token=None,apiServer=None,consul=None): + self.consul_url = consul + if os.path.isfile(os.path.expanduser("~/.kube/config")): # 如果存在默认的 kubeconfig 文件,加载本地配置 + print("本地调用") + config.load_kube_config() + elif kubeconfig: + kubeconfig_dict = self.parse_kubeconfig(kubeconfig) # 解析 kubeconfig 内容并创建配置对象 + config.load_kube_config_from_dict(kubeconfig_dict) # 使用 config.load_kube_config_from_dict 创建 kubeconfig 配置对象 + elif token: + kubeconfig = client.Configuration() + kubeconfig.host = apiServer # APISERVER 地址 + kubeconfig.verify_ssl = False + kubeconfig.api_key = {"authorization": f"Bearer {token}"} + client.Configuration.set_default(kubeconfig) + else : + pass + try: + self.core_api = client.CoreV1Api() + print("API 接口验证成功.") + except Exception as e: + print(f"验证 API 接口失败: {str(e)}") + sys.exit("API 接口验证失败。程序退出。") + + + def update_app_services(self): + try: + # 获取所有 Pod + pods = self.core_api.list_pod_for_all_namespaces().items + pod_dict = {} + + # 记录命名空间、Pod 名称和 Pod IP 到字典中 + for pod in pods: + namespace = pod.metadata.namespace + pod_name = pod.metadata.name + pod_ip = pod.status.pod_ip + pod_dict[(namespace, pod_name)] = pod_ip + #print("pod_dict:",pod_dict) + # 遍历所有服务 + services = self.core_api.list_service_for_all_namespaces().items + for service in services: + svc_name = service.metadata.name + svc_namespace = service.metadata.namespace + svc_port = service.spec.ports[0].port + svc_cluster_ip = service.spec.cluster_ip + prometheus_url = f"http://{svc_cluster_ip}:{svc_port}/actuator/prometheus" + try: + response = requests.get(prometheus_url,timeout=3) + if response.status_code == 200 and "system_cpu_usage" in response.text: + # 使用标签选择器选择与Prometheus相关联的Pod + app_selector = f"app={svc_name}" + + # 获取与服务关联的所有Pod + pod_list = self.core_api.list_namespaced_pod(namespace=svc_namespace, label_selector=app_selector).items + + for pod in pod_list: + pod_name = pod.metadata.name + pod_ip = pod.status.pod_ip + if pod_ip: + service_data = { + "id": f"{pod_name}", + "name": "application", + "address": pod_ip, + "port": svc_port, + "checks": [{ + "http": prometheus_url, + "interval": "5s" + }] + } + + try: + response = requests.put(f"{self.consul_url}/v1/agent/service/register", json=service_data) + if response.status_code == 200: + print(f"prometheus: {prometheus_url} Pod {pod_name},注册成功。") + else: + print(f"无法注册服务 {svc_name}。状态码: {response.status_code}") + print(response.text) + except Exception as register_err: + print(f"注册服务到 Consul 时出错: {register_err}") + + except Exception as prometheus_err: + pass + #print(f"检查 Prometheus 端点时出错: {prometheus_err}") + + except Exception as e: + print("处理注册服务到 Consul 时出错: %s" % e) + + def update_node_exporter_pods(self): + namespace = "monitor" # 定义要获取的命名空间 + label_selector = "app=node-exporter" # 定义 label selector + + try: + # 调用 Kubernetes API 获取 Pod 列表 + api_response = self.core_api.list_namespaced_pod(namespace, label_selector=label_selector) + for pod in api_response.items: + # 定义服务注册的数据 + data = { + "id": f"{pod.spec.node_name}-{pod.metadata.name}", + "name": "node-exporter", + "address": f"{pod.status.pod_ip}", + "port": 9100, + "checks": [{ + "http": f"http://{pod.status.pod_ip}:9100/metrics", + "interval": "5s" + }] + } + # 发送 PUT 请求以注册服务 + response = requests.put(f"{self.consul_url}/v1/agent/service/register", json=data) + + # 检查响应状态 + if response.status_code == 200: + print(f"Pod 名称:{pod.metadata.name} IP:{pod.status.pod_ip} 节点:{pod.spec.node_name} 注册成功.") + + else: + print(f"无法注册服务 {pod.spec.node_name}. 状态码: {response.status_code}") + print(response.text) + + except Exception as e: + print("获取 Pod 列表时出错: %s" % e) + + + + def clean_failed_instances(self): + time.sleep(3) + response = requests.get(f"{self.consul_url}/v1/health/state/critical") + if response.status_code == 200: + instances = response.json() + for instance in instances: + if instance['Status'] == 'critical': # 如果实例状态为严重 + service_id = instance['ServiceID'] + requests.put(f"{self.consul_url}/v1/agent/service/deregister/{service_id}") + print(f"失效实例ID: {service_id}") + else: + print(f"无法从 Consul API 获取数据。状态码:{response.status_code}") + + + +if __name__ == "__main__": + consul_url = "http://172.16.5.37:8500" + token = "xxxxxx" + apiServer = "https://46CA01C54B919FA35648DF454239A740.gr7.ap-northeast-1.eks.amazonaws.com" + k8s = KubernetesAPI(token=token,apiServer=apiServer,consul=consul_url) + k8s.update_app_services() + k8s.update_node_exporter_pods() + k8s.clean_failed_instances() \ No newline at end of file diff --git a/kubernets_api/consul.py b/kubernets_api/consul.py new file mode 100644 index 0000000..d5e85b7 --- /dev/null +++ b/kubernets_api/consul.py @@ -0,0 +1,157 @@ + +import os +import sys +import json +import time +import urllib3 +import requests +from kubernetes import client, config + +# 禁用 InsecureRequestWarning +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + +class KubernetesAPI: + def __init__(self, kubeconfig=None,token=None,apiServer=None,consul=None): + self.consul_url = consul + if os.path.isfile(os.path.expanduser("~/.kube/config")): # 如果存在默认的 kubeconfig 文件,加载本地配置 + print("本地调用") + config.load_kube_config() + elif kubeconfig: + kubeconfig_dict = self.parse_kubeconfig(kubeconfig) # 解析 kubeconfig 内容并创建配置对象 + config.load_kube_config_from_dict(kubeconfig_dict) # 使用 config.load_kube_config_from_dict 创建 kubeconfig 配置对象 + elif token: + kubeconfig = client.Configuration() + kubeconfig.host = apiServer # APISERVER 地址 + kubeconfig.verify_ssl = False + kubeconfig.api_key = {"authorization": f"Bearer {token}"} + client.Configuration.set_default(kubeconfig) + else : + pass + try: + self.core_api = client.CoreV1Api() + print("API 接口验证成功.") + except Exception as e: + print(f"验证 API 接口失败: {str(e)}") + sys.exit("API 接口验证失败。程序退出。") + + + def update_app_services(self): + try: + # 获取所有 Pod + pods = self.core_api.list_pod_for_all_namespaces().items + pod_dict = {} + + # 记录命名空间、Pod 名称和 Pod IP 到字典中 + for pod in pods: + namespace = pod.metadata.namespace + pod_name = pod.metadata.name + pod_ip = pod.status.pod_ip + pod_dict[(namespace, pod_name)] = pod_ip + #print("pod_dict:",pod_dict) + # 遍历所有服务 + services = self.core_api.list_service_for_all_namespaces().items + for service in services: + svc_name = service.metadata.name + svc_namespace = service.metadata.namespace + svc_port = service.spec.ports[0].port + svc_cluster_ip = service.spec.cluster_ip + prometheus_url = f"http://{svc_cluster_ip}:{svc_port}/actuator/prometheus" + try: + response = requests.get(prometheus_url,timeout=3) + if response.status_code == 200 and "system_cpu_usage" in response.text: + # 使用标签选择器选择与Prometheus相关联的Pod + app_selector = f"app={svc_name}" + + # 获取与服务关联的所有Pod + pod_list = self.core_api.list_namespaced_pod(namespace=svc_namespace, label_selector=app_selector).items + + for pod in pod_list: + pod_name = pod.metadata.name + pod_ip = pod.status.pod_ip + if pod_ip: + service_data = { + "id": f"{pod_name}", + "name": "application", + "address": pod_ip, + "port": svc_port, + "checks": [{ + "http": prometheus_url, + "interval": "5s" + }] + } + + try: + response = requests.put(f"{self.consul_url}/v1/agent/service/register", json=service_data) + if response.status_code == 200: + print(f"prometheus: {prometheus_url} Pod {pod_name},注册成功。") + else: + print(f"无法注册服务 {svc_name}。状态码: {response.status_code}") + print(response.text) + except Exception as register_err: + print(f"注册服务到 Consul 时出错: {register_err}") + + except Exception as prometheus_err: + pass + #print(f"检查 Prometheus 端点时出错: {prometheus_err}") + + except Exception as e: + print("处理注册服务到 Consul 时出错: %s" % e) + + def update_node_exporter_pods(self): + namespace = "monitor" # 定义要获取的命名空间 + label_selector = "app=node-exporter" # 定义 label selector + + try: + # 调用 Kubernetes API 获取 Pod 列表 + api_response = self.core_api.list_namespaced_pod(namespace, label_selector=label_selector) + for pod in api_response.items: + # 定义服务注册的数据 + data = { + "id": f"{pod.spec.node_name}-{pod.metadata.name}", + "name": "node-exporter", + "address": f"{pod.status.pod_ip}", + "port": 9100, + "checks": [{ + "http": f"http://{pod.status.pod_ip}:9100/metrics", + "interval": "5s" + }] + } + # 发送 PUT 请求以注册服务 + response = requests.put(f"{self.consul_url}/v1/agent/service/register", json=data) + + # 检查响应状态 + if response.status_code == 200: + print(f"Pod 名称:{pod.metadata.name} IP:{pod.status.pod_ip} 节点:{pod.spec.node_name} 注册成功.") + + else: + print(f"无法注册服务 {pod.spec.node_name}. 状态码: {response.status_code}") + print(response.text) + + except Exception as e: + print("获取 Pod 列表时出错: %s" % e) + + + + def clean_failed_instances(self): + time.sleep(3) + response = requests.get(f"{self.consul_url}/v1/health/state/critical") + if response.status_code == 200: + instances = response.json() + for instance in instances: + if instance['Status'] == 'critical': # 如果实例状态为严重 + service_id = instance['ServiceID'] + requests.put(f"{self.consul_url}/v1/agent/service/deregister/{service_id}") + print(f"失效实例ID: {service_id}") + else: + print(f"无法从 Consul API 获取数据。状态码:{response.status_code}") + + + +if __name__ == "__main__": + consul_url = "http://172.16.5.37:8500" + token = "eyJhbGciOiJSUzI1Nixxxx" + apiServer = "https://46CA01C54B919FA35648DF454239A740.gr7.ap-northeast-1.eks.amazonaws.com" + k8s = KubernetesAPI(token=token,apiServer=apiServer,consul=consul_url) + k8s.update_app_services() + k8s.update_node_exporter_pods() + k8s.clean_failed_instances() \ No newline at end of file diff --git a/kubernets_api/login.py b/kubernets_api/login.py new file mode 100644 index 0000000..c21e165 --- /dev/null +++ b/kubernets_api/login.py @@ -0,0 +1,58 @@ +import os +import json +import time +import re +from kubernetes import client, config, stream +import urllib3 +import hashlib +import yaml + + +# 禁用 InsecureRequestWarning +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + +class KubernetsAPI: + def __init__(self, kubeconfig=None,token=None,apiServer=None): + if os.path.isfile(os.path.expanduser("~/.kube/config")): # 如果存在默认的 kubeconfig 文件,加载本地配置 + print("本地调用") + config.load_kube_config() + elif kubeconfig: + kubeconfig_dict = self.parse_kubeconfig(kubeconfig) # 解析 kubeconfig 内容并创建配置对象 + config.load_kube_config_from_dict(kubeconfig_dict) # 使用 config.load_kube_config_from_dict 创建 kubeconfig 配置对象 + elif token: + kubeconfig = client.Configuration() + kubeconfig.host = apiServer # APISERVER 地址 + kubeconfig.verify_ssl = False + kubeconfig.api_key = {"authorization": f"Bearer {token}"} + client.Configuration.set_default(kubeconfig) + else : + pass + + + def parse_kubeconfig(self,kubeconfig_content): + try: + kubeconfig_dict = yaml.safe_load(kubeconfig_content) + return kubeconfig_dict + except yaml.YAMLError as e: + raise Exception(f"Error parsing kubeconfig content: {str(e)}") + + def verify_login(self): + try: + + self.core_api = client.CoreV1Api() + self.apps_api = client.AppsV1Api() + self.core_api.list_namespace() + print("api接口调用验证成功.") + return True + except Exception as e: + print(f"api接口调用验证失败.: {str(e)}") + return False + + +if __name__ == "__main__": + K8s = KubernetsAPI() + K8s.verify_login() + + + # pod_list = self.core_api.list_pod_for_all_namespaces(watch=False) + # print(pod_list) diff --git a/kubernets_api/pod.py b/kubernets_api/pod.py new file mode 100644 index 0000000..c4c69f6 --- /dev/null +++ b/kubernets_api/pod.py @@ -0,0 +1,70 @@ +import os +import sys +import json +import time +import re +from kubernetes import client, config, stream +import urllib3 +import hashlib +import yaml + + +# 禁用 InsecureRequestWarning +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + +class KubernetsAPI: + def __init__(self, kubeconfig=None,token=None,apiServer=None): + if os.path.isfile(os.path.expanduser("~/.kube/config")): # 如果存在默认的 kubeconfig 文件,加载本地配置 + print("本地调用") + config.load_kube_config() + elif kubeconfig: + kubeconfig_dict = self.parse_kubeconfig(kubeconfig) # 解析 kubeconfig 内容并创建配置对象 + config.load_kube_config_from_dict(kubeconfig_dict) # 使用 config.load_kube_config_from_dict 创建 kubeconfig 配置对象 + elif token: + kubeconfig = client.Configuration() + kubeconfig.host = apiServer # APISERVER 地址 + kubeconfig.verify_ssl = False + kubeconfig.api_key = {"authorization": f"Bearer {token}"} + client.Configuration.set_default(kubeconfig) + else : + pass + + try: + self.core_api = client.CoreV1Api() + self.apps_api = client.AppsV1Api() + print("api接口调用验证成功.") + except Exception as e: + print(f"api接口调用验证失败.: {str(e)}") + sys.exit("API接口调用验证失败.程序退出.") + + + def parse_kubeconfig(self,kubeconfig_content): + try: + kubeconfig_dict = yaml.safe_load(kubeconfig_content) + return kubeconfig_dict + except yaml.YAMLError as e: + raise Exception(f"Error parsing kubeconfig content: {str(e)}") + + + def get_node_exporter_pods(self): + # 定义要获取的命名空间 + namespace = "monitor" + # 定义 label selector + label_selector = "app=node-exporter" + + try: + # 调用 Kubernetes API 获取 Pod 列表 + api_response = self.core_api.list_namespaced_pod(namespace, label_selector=label_selector) + for pod in api_response.items: + print("Pod 名称: %s \t IP: %s \t 节点: %s" % (pod.metadata.name, pod.status.pod_ip, pod.spec.node_name)) + + except Exception as e: + print("获取 Pod 列表时出错: %s" % e) + +if __name__ == "__main__": + K8s = KubernetsAPI() + K8s.get_node_exporter_pods() + + + # pod_list = self.core_api.list_pod_for_all_namespaces(watch=False) + # print(pod_list) diff --git a/other/.DS_Store b/other/.DS_Store new file mode 100644 index 0000000..be530da Binary files /dev/null and b/other/.DS_Store differ diff --git a/other/configmap_and_secret.md b/other/configmap_and_secret.md new file mode 100644 index 0000000..f4bc92f --- /dev/null +++ b/other/configmap_and_secret.md @@ -0,0 +1,24 @@ +# configmap & secret + +```bash + +## 创建configmap +$ kubectl create -f cmdb-configmap.yml +## 查看configmap +$ kubectl -n test get cm +## 查看详情 +kubectl -n test describe cm mysql-cmdb + +## 创建configmap +$ kubectl apply -f cmdb-secret.yml + +## 查看secret信息 +$ kubectl -n test get secret +$ kubectl -n test describe secret mysql-cmdb + +## 创建和查看SSL证书 +$ openssl req -x509 -nodes -days 86400 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=*.pod.opsbase.cn/O=ingress-nginx" +$ kubectl -n test create secret tls tls-pod.opsbase.cn --key tls.key --cert tls.crt +$ kubectl -n test describe secret tls-pod.opsbase.cn +$ kubectl apply -f myblog.yml +``` diff --git a/other/demo-cmdb部署.md b/other/demo-cmdb部署.md new file mode 100644 index 0000000..db1f44a --- /dev/null +++ b/other/demo-cmdb部署.md @@ -0,0 +1,120 @@ +# 部署管理服务 +## 镜像 + +```test + +lghost/cmdb + or +harbor.opsbase.cn/public/cmdb:latest +``` + +## Demo +```yml +apiVersion: v1 +kind: Pod +metadata: + name: cmdb + namespace: test + labels: + component: cmdb +spec: + containers: + - name: cmdb + image: harbor.opsbase.cn/public/cmdb:latest + env: + - name: MYSQL_HOST # 指定root用户的用户名 + value: "127.0.0.1" + - name: MYSQL_PASSWD + value: "123456" + ports: + - containerPort: 8000 + - name: mysql + image: mysql:5.7 + args: + - --character-set-server=utf8mb4 + - --collation-server=utf8mb4_unicode_ci + ports: + - containerPort: 3306 + env: + - name: MYSQL_ROOT_PASSWORD + value: "123456" + - name: MYSQL_DATABASE + value: "cmdb" +``` +## 创建和访问Pod + +```text +# 创建namespace, namespace是逻辑上的资源池 +kubectl create namespace test + +# 使用指定文件创建Pod +kubectl create -f pod.yml + +# 清空pod +kubectl delete pod --all -n test + +# 查看pod,可以简写po +kubectl -n test get pod +kubectl -n test get po -o wide + +# 进入pod +kubectl -n test exec -it cmdb -c cmdb sh +# 初始化 +kubectl -n test exec -it cmdb -c cmdb -- python init.py + +# 进入db +kubectl -n test exec -it cmdb -c mysql bash +> mysql -p +> show databases; +``` + +### curl 测试接口 +```bash +curl -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "username":"admin", + "password": "123456" + }' \ + http://10.244.2.10:8000/api/oauth/login/ +``` + +### Infra容器 +```bash +docker ps -a |grep cmdb +# 在n容器node节点上 发现有三个容器 + +kubectl -n test exec -it cmdb -c cmdb -- ifconfig +kubectl -n test exec -it cmdb -c mysql -- sh +``` +>为了实现Pod内部的容器可以通过localhost通信,每个Pod都会启动Infra容器,然后Pod内部的其他容器的网络空间会共享该Infra容器的网络空间(Docker网络的container模式),Infra容器只需要hang住网络空间,不需要额外的功能,因此资源消耗极低。 + +pod容器命名: k8s____ + +### 查看pod详细信息 +``` +# 查看pod调度节点及pod_ip +kubectl -n test get pods -o wide +# 查看完整的yaml +kubectl -n test get po cmdb -o yaml +# 查看pod的明细信息及事件 +kubectl -n test describe pod cmdb +``` + +### 更新服务版本 +``` +kubectl apply -f demo-pod.yaml +``` +### 删除Pod服务 +``` +# 根据文件删除 +kubectl delete -f demo-pod.yaml + +# 根据pod_name删除 +kubectl -n delete pod +``` + +## mysql + app 拆分 + +kubectl delete pod --all -n test +kubectl create -f Deployment/two-pod/mysql.yml diff --git a/other/demo-nginx部署.md b/other/demo-nginx部署.md new file mode 100644 index 0000000..c9b5f8c --- /dev/null +++ b/other/demo-nginx部署.md @@ -0,0 +1,13 @@ + +# nginx + +```bash +kubectl create deployment nginx --image=nginx:latest -o yaml --dry-run >> nginx.yml +# 生成配置文件 + +## 启动pod +kubectl create -f nginx.yml + +## 挂载存储目录 +kubectl apply -f storage/nginx-nfs.yml +``` \ No newline at end of file diff --git a/other/demo-存储.md b/other/demo-存储.md new file mode 100644 index 0000000..55ae5ea --- /dev/null +++ b/other/demo-存储.md @@ -0,0 +1,51 @@ +## nfs + + +### 配置 + + +```bash +## 安装依赖 +$ yum install -y nfs-utils rpcbind + +## 修改配置 +$ cat /etc/exports +/opt/data/nfs 66.94.121.23/32(ro,sync,no_root_squash) 66.94.125.0/24(ro,sync,no_root_squash) + +## 启动服务 +$ systemctl enable rpcbind && systemctl enable nfs +$ systemctl restart rpcbind && systemctl restart nfs + +## 查看nfs +$ systemctl status nfs +$ showmount -e +Export list for test-opsbase-k8s-66-94-121-23: +/opt/data/nfs 66.94.125.0/24,66.94.121.23/32 + +## 挂载nfs +$ mount -t nfs -o rw,async 66.94.121.23:/opt/data/nfs /data/nfs/ +``` + +### nfs.yml + +```bash +## 管理员创建资源 +$ kubectl create -f nfs-pv.yml + +## 用户申请pv资源 +$ kubectl create -f nfs-pvc.yml + +## 查看pv/pvc +$ kubectl get pv,pvc -n test + +## pod 挂载pvc资源 +$ kubectl apply -f storage/nginx-nfs.yml + +$ kubectl -n test exec -it nginx-nfs-test-xxx -- df|grep -A 1 nfs +66.94.121.23:/opt/data/nfs + 411706368 58967040 331802624 15% /usr/share/nginx/html + +## 测试 +$ echo 'ok' > /opt/data/nfs/index.html +$ curl 10.244.2.227 # nginx pod +``` \ No newline at end of file diff --git a/other/deployment.md b/other/deployment.md new file mode 100644 index 0000000..790cd7c --- /dev/null +++ b/other/deployment.md @@ -0,0 +1,11 @@ +# deployment + +## 常用命令 + +```bash +kubectl -n test get deploy +kubectl -n test describe deploy cmdb + +## 一般重启deployment,常规操作是删掉对应的pod, 但如果有多个副本集的话一个个删很麻烦,可以执行 rollout +kubectl rollout restart deploy cmdb -n test +``` diff --git a/other/image/jenkins1.png b/other/image/jenkins1.png new file mode 100644 index 0000000..d0b17bd Binary files /dev/null and b/other/image/jenkins1.png differ diff --git a/other/image/jenkins2.png b/other/image/jenkins2.png new file mode 100644 index 0000000..5a43007 Binary files /dev/null and b/other/image/jenkins2.png differ diff --git a/other/image/jenkins3.png b/other/image/jenkins3.png new file mode 100644 index 0000000..9cebc54 Binary files /dev/null and b/other/image/jenkins3.png differ diff --git a/other/ingress-nginx.md b/other/ingress-nginx.md new file mode 100644 index 0000000..c85ccd3 --- /dev/null +++ b/other/ingress-nginx.md @@ -0,0 +1,24 @@ +# ingress-nginx + + +```bash + +## 打上标签 +$ kubectl label node k8s-m1 ingress=true +$ kubectl get node --show-labels + +## 启动ingress-nginx +$ kubectl apply -f mandatory.yml + +## 查看pod +$ kubectl -n ingress-nginx get pod,services -o wide + +## 查看Pending创建pod失败原因; 检查label或端口占用 +$ kubectl -n ingress-nginx describe pod + +$ kubectl -n test get ing + +## curl 调试地址 +kubectl get pod -A -owide|grep nginx # 获取 ingress-nginx 地址 +curl -skL -HHost:myblog.pod.opsbase.cn 66.94.11.11/admin -v +``` \ No newline at end of file diff --git a/other/jenkins-部署.md b/other/jenkins-部署.md new file mode 100644 index 0000000..ed35a57 --- /dev/null +++ b/other/jenkins-部署.md @@ -0,0 +1,18 @@ +# jenkins + + +```bash +## 部署 +kubectl apply -f jenkins-all.yaml + +## 查看状态 +kubectl -n jenkins get deploy,po,svc,pv,ingress + +## 解锁 Jenkins +kubectl -n jenkins exec jenkins-master-xxxxxxx -- cat /var/jenkins_home/secrets/initialAdminPassword + +## 进入pod +kubectl -n jenkins exec -it jenkins-master-6884cfb558-d2f82 -- sh + +kubectl get po -A -owide|grep jnlp +``` \ No newline at end of file diff --git a/other/k8s常用命令汇总.md b/other/k8s常用命令汇总.md new file mode 100644 index 0000000..ef2e4cf --- /dev/null +++ b/other/k8s常用命令汇总.md @@ -0,0 +1,121 @@ +# K8s + +## kubectl + +```bash +###################### 查看信息 ###################### +kubectl get pod -n kube-system -owide # 查看默认命名空间pod +kubectl --kubeconfig config-test get pod -A # 指定配置文件,查看所有pod + +# 查看kube系统组件状态 +kubectl get nodes # 查看node状态 +kubectl get namespace # 命名空间 +kubectl get svc # 查看services +kubectl get deployment,pod,service -n test # 查看test命名空间 deployment,pod,service +kubectl get endpoints # 获取endpoints列表 + +kubectl get pod nginx-lb-b99649675-9hkb5 -o yaml # pod详细信息 +kubectl describe node xxx # 显示 node 的详细信息 +kubectl -n test describe pod xxx # 显示 pod 的详细信息 +kubectl describe svc -n kube-system # 查看svc网路明细 + + +###################### 创建pod ###################### +kubectl create namespace test # 创建命名空间 +kubectl create -f Deployment/nginx.yml # 部署nginx +kubectl get deployment -n test -owide # 查看test命名空间 deployment +kubectl -n test scale deployment/nginx-deployment --replicas=1 # 调整Deployment副本数,replicas=0 重启 +kubectl create deployment nginx --image nginx --port 80 --replicas=1 # 创建 nginx pod + +kubectl -n test expose deployment nginx-deployment --type=NodePort --port=80 # 添加NodePort +# kubectl port-forward service/nginx-lb 80:8080 + +###################### 查看pod日志 ###################### +kubectl -n test logs -f nginx-deployment-585449566-vglh4 # 查看log + +###################### 进入pod ###################### +kubectl -n test exec -it nginx-deployment-585449566-vglh4 -- bash + +###################### 释放pod ###################### +kubectl delete pod --all # 删除所有 Pod +kubectl delete service nginx-lb # 删除service +kubectl delete deployment,pod,service web1 # 删除deployment,pod,service +kubectl delete pod test-web --force --grace-period=0 # 强制删除,设置优雅关闭时间为0 + +kubectl delete -f recommended.yaml # 基于yml删除 +kubectl delete pod,service --all -n kubernetes-dashboard # 清理指定命名空间po,svc +###################### app部署管理 ###################### +kubectl set image deployment/{应用名称} {容器名称}={镜像库地址} -n {命名空间} +# 更新镜像版本 +kubectl cordon node主机 +# 将k8s-node1节点设置为不可调度模式 +kubectl drain node主机 +# 将当前运行在k8s-node1节点上的容器驱离 +kubectl uncordon node主机 +# #执行完维护后,将节点重新加入调度 + +kubectl describe node k8s-m1| grep Taints +# 查看master only状态 +kubectl taint nodes k8s-m1 node-role.kubernetes.io/master- +# 希望master当node使用 +kubectl taint nodes k8s-m1 node-role.kubernetes.io/master=:NoSchedule +# 还原taint +###################### labels标签管理 ###################### +kubectl get node --show-labels # 查看节点 labels +kubectl -n test get pod --show-labels # 查看pod labels + +kubectl delete pod,svc -l name= # 删除所有包含某个 label 的pod 和 service + +kubectl label nodes node主机 uname=snake # 节点设置标签 +kubectl label nodes node主机 uname- # 节点删除标签 +kubectl label nodes k8s-s1 standalone=true # 节点设置标签... + + + + +###################### create 和 apply ###################### +kubectl create -f pod.yaml +kubectl apply -f pod.yaml +# apply 可以重复执行,create 不行; +# 如果在单个文件上运行操作已创建资源,则create和apply基本相同。 但是, apply允许您在目录下的多个文件上同时创建和修补。 + +###################### yaml ###################### +kubectl create deployment web --image=nginx -o yaml --dry-run >> nginx.yml +# 生成yaml配置文件 + +kubectl expose deployment web --port=80 --type=NodePort --target-port=80 --name=web1 -o yaml >> nginx-web1.yaml +# 对外暴露访问端口,生成yaml配置文件 + +kubectl edit deployment/nginx-deployment -n test # 编辑部署文件 + +###################### istio ###################### + +kubectl get virtualservice # 查看虚拟服务 +kubectl get destinationrule # 查看虚拟服务详情 +kubectl describe virtualservice # 自动注入代理边车 +kubectl label namespace istio-injection=enabled + + +###################### more ###################### + +# cp文件 +kubectl -n test cp busybox-x86_64 cmdb:/home/app/busybox +# 重启 pod +kubectl get pod -n -o yaml | kubectl replace --force -f - +# 修改网络类型 +kubectl patch service istio-ingressgateway -n istio-system -p '{"spec":{"type":"NodePort"}}' +# 查看前一个 pod 的日志,logs -p 选项 +kubectl logs --tail 100 -p user-klvchen-v1.0-6f67dcc46b-5b4qb > pre.log + +###################### docker ###################### +# docker资源限制 +--cpuset-cpus=0 多核CPU下绑定cpu核心标记 0-3范围 0,3多个 +--cpu-shares=1024 繁忙时段所占CPU比例 +-m 限制内存 -m 512m 限制512m内存 + +cpu-period: 设定docker运行容器重新分配CPU时间的周期(单位微秒) 1000微秒=1毫秒 最大值1000000=1秒 +cpu-quota: 在设置周期内分配给当前容器的CPU执行时间 + +# 更新运行中的docker限制 +docker update -m 4000m --memory-swap -1 ${name} +``` diff --git a/other/kubernetes-dashboard.md b/other/kubernetes-dashboard.md new file mode 100644 index 0000000..8bdfe3f --- /dev/null +++ b/other/kubernetes-dashboard.md @@ -0,0 +1,48 @@ + +# kubernetes-dashboard + +```bash + +wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.2.0/aio/deploy/recommended.yaml + +kubectl apply -f recommended.yaml + + +kubectl delete -f recommended.yaml +kubectl delete pod,service --all -n kubernetes-dashboard +``` + +## 使用NodePort端口 + + +``` +...... +spec: + ports: + - port: 443 + targetPort: 8443 + selector: + k8s-app: kubernetes-dashboard + type: NodePort # 加上type=NodePort变成NodePort类型的服务 +...... +``` + +## 查看状态 +``` +kubectl -n kubernetes-dashboard get svc,pod + +kubectl cluster-info +``` + +``` +kubectl apply -f dashboard-admin.conf +kubectl -n kubernetes-dashboard get secret |grep admin-token +admin-token-fqdpf kubernetes.io/service-account-token 3 7m17s + +# 使用该命令拿到admin-token-xxxx,然后粘贴 +kubectl -n kubernetes-dashboard get secret admin-token-fqdpf -o jsonpath={.data.token}|base64 -d +eyJhbGciOiJSUzI1NiIsImtpZCI6Ik1rb2xHWHMwbWFPMjJaRzhleGRqaExnVi1BLVNRc2txaEhETmVpRzlDeDQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi10b2tlbi1mcWRwZiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJhZG1pbiIsImt1YmVy...... +``` + + +https://66.94.121.23:30571/ \ No newline at end of file diff --git a/other/调度md b/other/调度md new file mode 100644 index 0000000..d67acc6 --- /dev/null +++ b/other/调度md @@ -0,0 +1,9 @@ + +# 调度 + + +## watch方式监听etcd,pod无状态多副本 + +```bash + +``` \ No newline at end of file diff --git a/other/面试题...md b/other/面试题...md new file mode 100644 index 0000000..8afc6e3 --- /dev/null +++ b/other/面试题...md @@ -0,0 +1,5 @@ +# namespace + +# cgroup + +# kubeporxy \ No newline at end of file