master
Logan.Li 2025-09-06 12:18:35 +08:00
parent 79610d9ece
commit 56a63a5577
35 changed files with 1135 additions and 158 deletions

View File

@ -1,7 +1,7 @@
<!--
* @Author: admin@attacker.club
* @Date: 2022-12-10 22:27:24
* @LastEditTime: 2023-12-14 17:12:13
* @LastEditTime: 2025-06-03 10:03:56
* @Description:
-->
@ -13,7 +13,7 @@
# wget -O /usr/local/sbin/docker-compose https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m)
curl -L https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m) -o /usr/local/sbin/docker-compose
chmod +x /usr/local/sbin/docker-compose
docker-compose version # 查看docker-compose版本
```

View File

@ -17,4 +17,5 @@ docker cp mysql-connector-java-5.1.48-bin.jar confluence:/opt/atlassian/confluen
# cp数据库驱动
docker exec -it confluence java -jar /opt/atlassian/confluence/atlassian-agent.jar -p conf -m pp@pangshare.com -n pangshare -o https://www.pangshare.com -s B37H-XJIY-BCSR-FZQQ
#

View File

@ -0,0 +1,16 @@
FROM golang:1.22.5 AS builder
COPY . /app
WORKDIR /app
RUN GOPROXY='https://goproxy.io',direct CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
go build -o app main.go
FROM alpine:3.10
ENV TZ Asia/Shanghai
# RUN apk add --no-cache tzdata && \
# ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
RUN apk add --no-cache tzdata
COPY --from=builder /app /
EXPOSE 8080
CMD ["./app"]

View File

@ -0,0 +1,57 @@
# 第一阶段:构建阶段
FROM node:20.6.0 AS builder
# 启用 Corepack 并安装 pnpm
RUN corepack enable && corepack prepare pnpm@latest --activate
WORKDIR /app
# 复制依赖文件
COPY package.json pnpm-lock.yaml* ./
# 创建 public/tradingview 目录(确保路径存在)
RUN mkdir -p /app/public/tradingview
# 复制 public 目录
COPY public /app/public
# 安装项目依赖(包含开发依赖)
RUN pnpm install
# 复制源代码
COPY . .
# 构建项目
RUN pnpm run build
# 第二阶段:运行阶段
FROM node:20.6-alpine AS runtime
# 安装 pnpm
RUN npm install -g pnpm
WORKDIR /app
# 从构建阶段复制必要文件(先复制 public/tradingview
COPY --from=builder /app/public ./public
# 复制依赖文件
COPY --from=builder /app/package.json .
COPY --from=builder /app/pnpm-lock.yaml .
# 预先下载生产依赖(此时 public/tradingview 已存在)
RUN pnpm fetch --prod
# 复制剩余文件
COPY --from=builder /app/server.js .
COPY --from=builder /app/next.config.mjs .
COPY --from=builder /app/.next ./.next
# 离线安装生产依赖
RUN pnpm install --prod --offline --no-frozen-lockfile
# 暴露端口
EXPOSE 3000
# 定义容器启动命令
CMD ["pnpm", "run", "start"]

View File

@ -6,6 +6,7 @@ token=************
dashboard_port = 7500
dashboard_user = root
dashboard_pwd = password1
vhost_http_port = 80
vhost_https_port = 443
tcp_mux = ture

View File

@ -1,4 +1,12 @@
#!/bin/bash
###
# @Author: Logan.Li
# @Gitee: https://gitee.com/attacker
# @email: admin@attacker.club
# @Date: 2024-12-29 15:32:30
# @LastEditTime: 2025-05-20 23:44:55
# @Description:
###
docker stop gitlab
docker rm gitlab

View File

@ -4,7 +4,19 @@ docker run -d \
# @Gitee: https://gitee.com/attacker
# @email: admin@attacker.club
# @Date: 2025-01-03 12:57:46
# @LastEditTime: 2025-01-03 12:58:31
# @LastEditTime: 2025-01-18 23:41:38
# @Description:
###
docker run -d --name=grafana --restart unless-stopped -p 3000:3000 grafana/grafana
# https://grafana.com/docs/grafana/latest/setup-grafana/installation/docker
##
# grafana:9.4.0
# docker run -d --name=grafana --restart unless-stopped --volume /Users/loganli/docker_data:/var/lib/grafana -p 3000:3000 grafana/grafana-oss
#
#grafana/grafana-enterprise 企业版
#grafana/grafana-oss 开源版
mkdir data
docker run -d -p 3000:3000 --name=grafana \
--restart unless-stopped \
--user "$(id -u)" \
--volume "$PWD/data:/var/lib/grafana" \
grafana/grafana-oss

11
2.docker/nginx-ui.sh Normal file
View File

@ -0,0 +1,11 @@
docker run -dit \
--name=nginx-ui \
--restart=always \
-e TZ=Asia/Shanghai \
-v /opt/appdata/nginx:/etc/nginx \
-v /opt/appdata/nginx-ui:/etc/nginx-ui \
-v /var/run/docker.sock:/var/run/docker.sock \
-p 80:80 -p 443:443 \
uozi/nginx-ui:latest

13
2.docker/ssr.sh Normal file
View File

@ -0,0 +1,13 @@
###
# @Author: Logan.Li
# @Gitee: https://gitee.com/attacker
# @email: admin@attacker.club
# @Date: 2025-03-12 20:17:36
# @LastEditTime: 2025-05-20 23:43:08
# @Description:
###
docker run -d --name ss-server -p 53024:53024 -p 53024:53024/udp \
-e PASSWORD="nwlHBisiJeNqYHzeRa" \
-e METHOD="chacha20-ietf-poly1305" \
shadowsocks/shadowsocks-libev

26
3.kubernetes/secret/1.sh Normal file
View File

@ -0,0 +1,26 @@
#!/bin/bash
###
# @Author: Logan.Li
# @Gitee: https://gitee.com/attacker
# @email: admin@attacker.club
# @Date: 2025-03-12 20:26:34
# @LastEditTime: 2025-03-12 20:52:14
# @Description:
###
# 输出为单行,避免多余的换行符影响编码
cat /opt/www/pokerapi/storage/app/secret/private_key.pem | base64 -w 0
# 挂载到指定容器
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
volumeMounts:
- name: private-key-volume
mountPath: /opt/www/pokerapi/storage/app/secret/private_key.pem
subPath: private_key.pem
volumes:
- name: private-key-volume
secret:
secretName: private-key-secret

View File

@ -3,21 +3,20 @@ apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: consul-data-pvc
namespace: monitor
namespace: prometheus
spec:
accessModes:
- ReadWriteMany
storageClassName: nfs-provisioner # 您需要提前定义的NFS存储类
- ReadWriteMany
storageClassName: nfs-provisioner # 您需要提前定义的NFS存储类
resources:
requests:
storage: 10Gi # 根据您的存储需求进行调整
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: consul
namespace: monitor
namespace: prometheus
spec:
replicas: 1 # 根据您的需求进行调整
selector:
@ -29,16 +28,16 @@ spec:
app: consul
spec:
containers:
- name: consul
image: consul:1.15
ports:
- containerPort: 8500
volumeMounts:
- name: consul-data
mountPath: /consul/data
volumes:
- name: consul
image: consul:1.15
ports:
- containerPort: 8500
volumeMounts:
- name: consul-data
persistentVolumeClaim:
claimName: consul-data-pvc
# namespace: monitor
mountPath: /consul/data
volumes:
- name: consul-data
persistentVolumeClaim:
claimName: consul-data-pvc
# namespace: prometheus

View File

@ -1,66 +0,0 @@
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
app: grafana
name: grafana
namespace: monitor
spec:
replicas: 1
selector:
matchLabels:
app: grafana
template:
metadata:
labels:
app: grafana
spec:
nodeSelector:
node-type: grafana
securityContext:
runAsNonRoot: true
runAsUser: 10555
fsGroup: 10555
containers:
- name: grafana
image: grafana/grafana:latest
imagePullPolicy: IfNotPresent
env:
- name: GF_AUTH_BASIC_ENABLED
value: "true"
- name: GF_AUTH_ANONYMOUS_ENABLED
value: "false"
readinessProbe:
httpGet:
path: /login
port: 3000
volumeMounts:
- mountPath: /var/lib/grafana
name: monitor-data
ports:
- containerPort: 3000
protocol: TCP
volumes:
- name: monitor-data
persistentVolumeClaim:
claimName: grafana-data-pvc
# emptyDir: {}
# hostPath:
# path: /data/grafana
# type: DirectoryOrCreate
---
kind: Service
apiVersion: v1
metadata:
labels:
app: grafana
name: grafana-service
namespace: monitor
spec:
ports:
- port: 3000
targetPort: 3000
selector:
app: grafana

View File

@ -0,0 +1,58 @@
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
app: grafana
name: grafana
namespace: prometheus
spec:
replicas: 1
selector:
matchLabels:
app: grafana
template:
metadata:
labels:
app: grafana
spec:
securityContext:
runAsNonRoot: true
runAsUser: 10555
fsGroup: 10555
containers:
- name: grafana
image: grafana/grafana:latest
imagePullPolicy: IfNotPresent
env:
- name: GF_AUTH_BASIC_ENABLED
value: "true"
- name: GF_AUTH_ANONYMOUS_ENABLED
value: "false"
readinessProbe:
httpGet:
path: /login
port: 3000
volumeMounts:
- mountPath: /var/lib/grafana
name: monitor-data
ports:
- containerPort: 3000
protocol: TCP
volumes:
- name: monitor-data
persistentVolumeClaim:
claimName: monitor-data
---
kind: Service
apiVersion: v1
metadata:
labels:
app: grafana
name: grafana-service
namespace: prometheus
spec:
ports:
- port: 3000
targetPort: 3000
selector:
app: grafana

View File

@ -2,7 +2,7 @@ apiVersion: apps/v1
kind: DaemonSet
metadata:
name: node-exporter
namespace: monitor
namespace: prometheus
labels:
app: node-exporter
spec:

View File

@ -2,44 +2,43 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus
namespace: monitor
namespace: prometheus # Prometheus 部署的命名空间
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: prometheus
rules:
- apiGroups:
- ""
# 基础资源发现权限
- apiGroups: [ "" ]
resources:
- nodes
- nodes/metrics
- nodes/proxy
- services
- endpoints
- pods
- nodes/proxy
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- configmaps
- nodes/metrics
verbs:
- get
- nonResourceURLs:
- /metrics
verbs:
- get
- namespaces # 关键:允许发现所有命名空间
verbs: [ "get", "list", "watch" ]
# Ingress 监控权限(兼容新旧版本)
- apiGroups: [ "extensions", "networking.k8s.io" ]
resources: [ "ingresses" ]
verbs: [ "get", "list", "watch" ]
# Prometheus Operator CRD 权限(如需)
- apiGroups: [ "monitoring.coreos.com" ]
resources:
- servicemonitors
- podmonitors
- prometheuses
- alertmanagers
verbs: [ "get", "list", "watch" ]
# 非资源权限(如 /metrics 端点)
- nonResourceURLs: [ "/metrics" ]
verbs: [ "get" ]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
@ -52,4 +51,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: prometheus
namespace: monitor
namespace: prometheus # 必须与 ServiceAccount 命名空间一致

View File

@ -2,7 +2,7 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: prometheus-config
namespace: monitor
namespace: prometheus
data:
prometheus.yml: |
global:

View File

@ -2,7 +2,7 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: prometheus
namespace: monitor
namespace: prometheus
labels:
app: prometheus
spec:
@ -14,7 +14,8 @@ spec:
labels:
app: prometheus
spec:
securityContext: #指定运行的用户为root
securityContext:
#指定运行的用户为root
runAsUser: 0
serviceAccountName: prometheus
containers:
@ -22,10 +23,10 @@ spec:
name: prometheus
args:
- "--config.file=/etc/prometheus/prometheus.yml" #通过volume挂载prometheus.yml
- "--storage.tsdb.path=/prometheus" #通过vlolume挂载目录/prometheus
- "--storage.tsdb.path=/prometheus" #通过vlolume挂载目录/prometheus
- "--storage.tsdb.retention.time=24h"
- "--web.enable-admin-api" #控制对admin HTTP API的访问其中包括删除时间序列等功能
- "--web.enable-lifecycle" #支持热更新直接执行localhost:9090/-/reload立即生效
- "--web.enable-admin-api" #控制对admin HTTP API的访问其中包括删除时间序列等功能
- "--web.enable-lifecycle" #支持热更新直接执行localhost:9090/-/reload立即生效
ports:
- containerPort: 9090
name: http

View File

@ -1,14 +0,0 @@
# 部署仓库
```bash
## 添加仓库
helm repo add harbor https://helm.goharbor.io
helm repo add bitnami https/charts.bitnami.com/bitnami
helm repo add microsoft http://mirror.azure.cn/kubernetes/charts
## 更新仓库
helm repo update
## 移除仓库
helm repo remove xxxx
```

View File

@ -0,0 +1,29 @@
<!--
* @Author: Logan.Li
* @Gitee: https://gitee.com/attacker
* @email: admin@attacker.club
* @Date: 2024-12-29 15:32:30
* @LastEditTime: 2025-03-30 02:32:03
* @Description:
-->
# 部署仓库
```bash
# 安装helm
wget https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz
tar -zxvf helm-v3.9.4-linux-amd64.tar.gz
mv linux-amd64/helm /usr/local/bin/helm
helm version
## 添加仓库
helm repo add harbor https://helm.goharbor.io
helm repo add bitnami https/charts.bitnami.com/bitnami
helm repo add microsoft http://mirror.azure.cn/kubernetes/charts
## 更新仓库
helm repo update
## 移除仓库
helm repo remove xxxx
```

View File

@ -1,12 +1,5 @@
#!/usr/bin/env bash
###
# @Author: Logan.Li
# @Gitee: https://gitee.com/attacker
# @email: admin@attacker.club
# @Date: 2023-10-28 00:01:57
# @LastEditTime: 2023-10-28 01:30:56
# @Description:
###
CURRENT_DIR=$(
cd "$(dirname "$0")"
@ -52,9 +45,9 @@ fi
>${CURRENT_DIR}/install.log
logger info "======================= 开始安装 =======================" 2>&1 | tee -a ${CURRENT_DIR}/install.log
logger info "检查 是否存在离线包 [offline.tar.gz]"
if [ -f ${CURRENT_DIR}/offline.tar.gz ]; then
tar zxf offline.tar.gz
logger info "检查 是否存在离线包 [docker_offline.tar.gz]"
if [ -f ${CURRENT_DIR}/docker_offline.tar.gz ]; then
tar zxf docker_offline.tar.gz
chmod +x docker-install.sh && ./docker-install.sh
logger success "离线docker 安装成功"
\cp docker-compose /usr/local/sbin/docker-compose
@ -84,10 +77,13 @@ else
fi
# 检查是否有离线镜像
if [ -f ${CURRENT_DIR}/image.tar.gz ]; then
logger info "检查到离线镜像 [image.tar.gz]"
cat image.tar.gz | gzip -d | docker load
if [ -f ${CURRENT_DIR}/zabbix_image.tar.gz ]; then
logger info "检查到离线镜像 [zabbix_image.tar.gz]"
cat zabbix_image.tar.gz | gzip -d | docker load
logger success "完成镜像恢复"
fi
logger info "开始服务部署 ... [xx.yml]"
logger info "开始服务部署 ... [zabbix.yml]"
docker compose -f zabbix.yml up -d
logger success "服务部署完成"

View File

@ -76,6 +76,7 @@ sleep 3
echo "mysql -h127.0.0.1 -uroot -p$rootPassword" >mysql.txt
echo "mysql -h127.0.0.1 -uzabbix -p$zbxPassword" >>mysql.txt
echo "http://zabbix 账号: Admin / zabbix"
## sql添加远程账号
# CREATE USER 'admin'@'%' ;
# GRANT ALL ON *.* TO 'admin'@'%' IDENTIFIED WITH mysql_native_password BY 'adminPwd123';

View File

@ -0,0 +1,77 @@
'''
Author: Logan.Li
Gitee: https://gitee.com/attacker
email: admin@attacker.club
Date: 2025-07-07 22:13:21
LastEditTime: 2025-07-07 22:13:25
Description:
'''
import requests
import time
import logging
from threading import Lock
class ConsulCleaner:
def __init__(self, consul_url="http://consul.xx.me"):
self.consul_url = consul_url.rstrip('/')
self.lock = Lock()
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
self.logger = logging.getLogger(__name__)
def clean_failed_instances(self, max_retries=3):
"""Clean up failed (critical) instances from Consul"""
time.sleep(3) # Initial delay
for attempt in range(max_retries):
try:
with self.lock:
# Get critical services
health_url = f"{self.consul_url}/v1/health/state/critical"
response = requests.get(health_url, timeout=10)
if response.status_code != 200:
self.logger.error(f"Consul API returned {response.status_code}: {response.text}")
return False
instances = response.json()
if not instances:
self.logger.info("No critical instances found")
return True
# Deregister each critical service
for instance in instances:
if instance.get('Status') == 'critical':
service_id = instance.get('ServiceID')
if not service_id:
continue
dereg_url = f"{self.consul_url}/v1/agent/service/deregister/{service_id}"
dereg_response = requests.put(dereg_url, timeout=5)
if dereg_response.status_code == 200:
self.logger.info(f"Successfully deregistered failed instance: {service_id}")
else:
self.logger.warning(
f"Failed to deregister {service_id}. Status: {dereg_response.status_code}"
)
return True
except requests.exceptions.RequestException as e:
self.logger.error(f"Attempt {attempt + 1} failed: {str(e)}")
if attempt < max_retries - 1:
time.sleep(2 ** attempt) # Exponential backoff
continue
return False
if __name__ == "__main__":
cleaner = ConsulCleaner()
if cleaner.clean_failed_instances():
print("Cleanup completed successfully")
else:
print("Cleanup encountered errors")

View File

@ -0,0 +1,111 @@
import os
import sys
import json
import time
import re
import urllib3
import hashlib
import yaml
import requests
# 禁用 InsecureRequestWarning
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class ConsulNodeRegistration:
def __init__(self, ip_file_path="ip.txt", consul_url="http://consul.opsx.top"):
self.ip_file_path = ip_file_path
self.consul_url = consul_url
# 检查IP文件是否存在
if not os.path.isfile(self.ip_file_path):
print(f"错误: IP文件 '{self.ip_file_path}' 不存在")
sys.exit(1)
print(f"初始化成功, 将从 {self.ip_file_path} 读取IP地址列表")
def read_ip_file(self):
"""从文件中读取IP地址列表"""
try:
with open(self.ip_file_path, 'r') as f:
# 读取所有行并移除空白字符
ip_list = [line.strip() for line in f.readlines() if line.strip()]
return ip_list
except Exception as e:
print(f"读取IP文件时出错: {str(e)}")
return []
def register_node_exporters(self):
"""从IP文件读取IP地址并注册到Consul"""
# 读取IP列表
ip_list = self.read_ip_file()
if not ip_list:
print("没有找到可用的IP地址")
return
print(f"找到 {len(ip_list)} 个IP地址开始注册到Consul")
for index, ip in enumerate(ip_list):
try:
# 生成唯一的节点名称
node_name = f"node-{index+1}"
print(f"正在注册: IP: {ip} \t 节点: {node_name}")
# 定义服务注册的数据
data = {
"id": f"{node_name}-exporter",
"name": "node-exporter",
"address": ip,
"port": 9100,
"checks": [{
"http": f"http://{ip}:9100/metrics",
"interval": "5s"
}]
}
# 发送 PUT 请求以注册服务
response = requests.put(f"{self.consul_url}/v1/agent/service/register", json=data)
# 检查响应状态
if response.status_code == 200:
print(f"服务 {node_name} 注册成功.")
else:
print(f"无法注册服务 {node_name}. 状态码: {response.status_code}")
print(response.text)
except Exception as e:
print(f"注册IP {ip} 时出错: {str(e)}")
def clean_failed_instances(self):
"""清理失效的服务实例"""
time.sleep(3)
try:
response = requests.get(f"{self.consul_url}/v1/health/state/critical")
if response.status_code == 200:
instances = response.json()
for instance in instances:
if instance['Status'] == 'critical': # 如果实例状态为严重
service_id = instance['ServiceID']
requests.put(f"{self.consul_url}/v1/agent/service/deregister/{service_id}")
print(f"已清理失效实例ID: {service_id}")
else:
print(f"无法从 Consul API 获取数据。状态码:{response.status_code}")
except Exception as e:
print(f"清理失效实例时出错: {str(e)}")
if __name__ == "__main__":
# 默认从当前目录下的ip.txt文件读取IP地址
# 可以通过命令行参数指定不同的文件路径
ip_file = "ip.txt"
if len(sys.argv) > 1:
ip_file = sys.argv[1]
consul = ConsulNodeRegistration(ip_file_path=ip_file)
consul.register_node_exporters()
consul.clean_failed_instances()

View File

@ -0,0 +1,2 @@
192.168.1.1
192.168.1.2

View File

@ -148,7 +148,7 @@ class KubernetesAPI:
if __name__ == "__main__":
consul_url = "http://172.16.5.37:8500"
consul_url = "https://consul.yace.me"
token = "xxxxxx"
apiServer = "https://46CA01C54B919FA35648DF454239A740.gr7.ap-northeast-1.eks.amazonaws.com"
k8s = KubernetesAPI(token=token,apiServer=apiServer,consul=consul_url)

222
kubernets_api/consul-v2.py Normal file
View File

@ -0,0 +1,222 @@
import os
import sys
import time
import json
import hashlib
from concurrent.futures import ThreadPoolExecutor, as_completed
import urllib3
import requests
from kubernetes import client, config
from pathlib import Path
# 禁用 InsecureRequestWarning
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class KubernetesMonitor:
def __init__(self, consul_url):
self.consul_url = consul_url
self.cache_dir = Path.home() / ".k8s_monitor_cache"
self.cache_dir.mkdir(exist_ok=True)
# 加载kubeconfig
self._init_kubernetes()
# 初始化线程池
self.executor = ThreadPoolExecutor(max_workers=10)
def _init_kubernetes(self):
"""初始化Kubernetes客户端"""
kubeconfig_path = os.path.expanduser("~/.kube/config")
if not os.path.isfile(kubeconfig_path):
sys.exit(f"错误未找到kubeconfig文件 {kubeconfig_path}")
try:
config.load_kube_config()
self.core_api = client.CoreV1Api()
print("Kubernetes API 连接成功")
except Exception as e:
sys.exit(f"Kubernetes连接失败: {str(e)}")
def _get_service_hash(self, service):
"""生成服务唯一标识哈希"""
return hashlib.md5(f"{service.metadata.namespace}/{service.metadata.name}".encode()).hexdigest()
def _load_cache(self, cache_key):
"""加载缓存"""
cache_file = self.cache_dir / f"{cache_key}.json"
if cache_file.exists():
with open(cache_file) as f:
return json.load(f)
return None
def _save_cache(self, cache_key, data, ttl=300):
"""保存缓存默认5分钟有效期"""
cache_file = self.cache_dir / f"{cache_key}.json"
with open(cache_file, 'w') as f:
json.dump({
"expire": time.time() + ttl,
"data": data
}, f)
def _is_cache_valid(self, cache_key):
"""检查缓存有效性"""
cache_file = self.cache_dir / f"{cache_key}.json"
if not cache_file.exists():
return False
try:
with open(cache_file) as f:
cache_data = json.load(f)
return cache_data["expire"] > time.time()
except:
return False
def _batch_register_services(self, services):
"""批量注册服务到Consul"""
if not services:
return
try:
response = requests.put(
f"{self.consul_url}/v1/agent/service/register",
json=services,
timeout=10
)
if response.status_code == 200:
print(f"✓ 批量注册成功 {len(services)} 个服务")
else:
print(f"× 批量注册失败[{response.status_code}]: {response.text.strip()}")
except Exception as e:
print(f"! Consul批量注册异常: {str(e)}")
def _process_service(self, service):
"""并行处理单个服务"""
svc_hash = self._get_service_hash(service)
# 检查缓存有效性
if self._is_cache_valid(svc_hash):
return None
# 服务基本信息
svc_name = service.metadata.name
svc_namespace = service.metadata.namespace
svc_port = service.spec.ports[0].port
svc_cluster_ip = service.spec.cluster_ip
prometheus_url = f"http://{svc_cluster_ip}:{svc_port}/actuator/prometheus"
# 验证Prometheus端点带缓存
cache_key = f"prom_check_{svc_hash}"
if self._is_cache_valid(cache_key):
is_valid = self._load_cache(cache_key)["data"]
else:
is_valid = self._check_prometheus_endpoint(prometheus_url)
self._save_cache(cache_key, is_valid, ttl=300) # 5分钟缓存
if not is_valid:
return None
# 构建注册数据
service_data = {
"id": f"app-{svc_namespace}-{svc_name}",
"name": "application",
"address": svc_cluster_ip,
"port": svc_port,
"checks": [{
"http": prometheus_url,
"interval": "5s"
}]
}
# 保存服务缓存
self._save_cache(svc_hash, service_data)
return service_data
def _check_prometheus_endpoint(self, url):
"""带超时的端点检查"""
try:
response = requests.get(url, timeout=3)
return response.status_code == 200 and "system_cpu_usage" in response.text
except:
return False
def update_services(self):
"""增量更新服务"""
try:
# 获取当前所有服务
current_services = self.core_api.list_service_for_all_namespaces().items
# 并行处理服务
futures = []
for service in current_services:
futures.append(self.executor.submit(self._process_service, service))
# 收集结果
batch_services = []
for future in as_completed(futures):
result = future.result()
if result:
batch_services.append(result)
# 批量注册
if batch_services:
self._batch_register_services(batch_services)
print(f"本次更新处理 {len(batch_services)} 个服务")
except Exception as e:
print(f"服务更新异常: {str(e)}")
def clean_failed_instances(self):
"""智能清理失效实例"""
try:
# 获取所有失效实例
response = requests.get(
f"{self.consul_url}/v1/health/state/critical",
timeout=5
)
if response.status_code != 200:
return
critical_services = response.json()
if not critical_services:
return
# 并行注销
futures = []
for instance in critical_services:
futures.append(
self.executor.submit(
requests.put,
f"{self.consul_url}/v1/agent/service/deregister/{instance['ServiceID']}",
timeout=3
)
)
# 等待完成
success_count = 0
for future in as_completed(futures):
try:
response = future.result()
if response.status_code == 200:
success_count += 1
except:
pass
print(f"清理完成,共移除 {success_count}/{len(critical_services)} 个失效实例")
except Exception as e:
print(f"清理流程异常: {str(e)}")
if __name__ == "__main__":
consul_endpoint = "http://172.16.5.37:8500" # 替换实际Consul地址
monitor = KubernetesMonitor(consul_endpoint)
try:
# 增量更新服务
monitor.update_services()
# 异步清理失效实例
monitor.clean_failed_instances()
except KeyboardInterrupt:
print("\n操作已中止")
sys.exit(0)

View File

@ -0,0 +1,237 @@
#!/usr/bin/env python3
"""
Author: Logan.Li
Gitee: https://gitee.com/attacker
email: admin@attacker.club
Date: 2025-01-05 12:25:52
LastEditTime: 2025-04-09
Description: 导出Kubernetes Deployment+Service和Ingress资源
"""
import os
import yaml
import subprocess
import sys
# 尝试导入kubernetes模块如果失败则尝试安装
try:
from kubernetes import client, config
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
except ImportError:
print("kubernetes模块未安装尝试安装...")
subprocess.check_call([sys.executable, "-m", "pip", "install", "kubernetes"])
from kubernetes import client, config
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def sanitize_resource(obj, resource_type):
"""清理资源对象,移除不必要的字段"""
# 获取序列化后的对象
obj_yaml = api_client.sanitize_for_serialization(obj)
# 确保正确的apiVersion和kind在最前面
if isinstance(obj, client.V1Service):
obj_yaml['apiVersion'] = 'v1'
obj_yaml['kind'] = 'Service'
elif isinstance(obj, client.V1Deployment):
obj_yaml['apiVersion'] = 'apps/v1'
obj_yaml['kind'] = 'Deployment'
elif isinstance(obj, client.V1Ingress):
obj_yaml['apiVersion'] = 'networking.k8s.io/v1'
obj_yaml['kind'] = 'Ingress'
# 重新组织字段顺序确保apiVersion和kind在最前面
ordered_obj = {
'apiVersion': obj_yaml.pop('apiVersion'),
'kind': obj_yaml.pop('kind'),
'metadata': obj_yaml.pop('metadata', {}),
'spec': obj_yaml.pop('spec', {})
}
# 清理metadata中的不必要字段
metadata_fields = ['selfLink', 'generation', 'creationTimestamp',
'resourceVersion', 'uid', 'managedFields']
for field in metadata_fields:
ordered_obj['metadata'].pop(field, None)
# 清理annotations
annotations_to_remove = [
'deployment.kubernetes.io/revision',
'kubectl.kubernetes.io/restartedAt',
'kubectl.kubernetes.io/last-applied-configuration'
]
if 'annotations' in ordered_obj['metadata']:
for ann in annotations_to_remove:
ordered_obj['metadata']['annotations'].pop(ann, None)
# 如果annotations为空则删除该字段
if not ordered_obj['metadata']['annotations']:
del ordered_obj['metadata']['annotations']
# 清理spec中的不必要字段
if resource_type == 'Service':
if 'clusterIPs' in ordered_obj['spec']:
del ordered_obj['spec']['clusterIPs']
if 'clusterIP' in ordered_obj['spec']:
del ordered_obj['spec']['clusterIP']
if 'ipFamilies' in ordered_obj['spec']:
del ordered_obj['spec']['ipFamilies']
if 'internalTrafficPolicy' in ordered_obj['spec']:
del ordered_obj['spec']['internalTrafficPolicy']
if 'ipFamilyPolicy' in ordered_obj['spec']:
del ordered_obj['spec']['ipFamilyPolicy']
elif resource_type == 'Deployment':
if 'strategy' in ordered_obj['spec']:
# 保留strategy但移除不需要的字段
strategy = ordered_obj['spec']['strategy']
if 'rollingUpdate' in strategy:
rolling_update = strategy['rollingUpdate']
if 'maxUnavailable' in rolling_update:
del rolling_update['maxUnavailable']
if 'maxSurge' in rolling_update:
del rolling_update['maxSurge']
# 如果rollingUpdate为空则删除
if not rolling_update:
del strategy['rollingUpdate']
# 移除status字段和其他不需要的字段
if 'status' in obj_yaml:
del obj_yaml['status']
# 添加其他可能需要的字段
for key, value in obj_yaml.items():
if key not in ['apiVersion', 'kind', 'metadata', 'spec'] and value is not None:
ordered_obj[key] = value
return ordered_obj
def export_resources():
"""导出Kubernetes资源到不同目录"""
# 加载kubeconfig
config.load_kube_config()
global api_client
api_client = client.ApiClient()
# 创建API客户端
apps_api = client.AppsV1Api(api_client)
core_api = client.CoreV1Api(api_client)
networking_api = client.NetworkingV1Api(api_client)
# 获取所有资源
all_deployments = apps_api.list_deployment_for_all_namespaces().items
all_services = core_api.list_service_for_all_namespaces().items
all_ingresses = networking_api.list_ingress_for_all_namespaces().items
# 创建输出目录
deployments_dir = os.path.join(os.getcwd(), 'deployments')
ingress_dir = os.path.join(os.getcwd(), 'ingress')
os.makedirs(deployments_dir, exist_ok=True)
os.makedirs(ingress_dir, exist_ok=True)
# 按命名空间和组织资源
namespace_resources = {}
# 处理Deployment和Service
for dep in all_deployments:
namespace = dep.metadata.namespace
dep_name = dep.metadata.name
if namespace not in namespace_resources:
namespace_resources[namespace] = {'deployments': {}, 'services': {}, 'ingresses': {}}
# 清理并添加Deployment
cleaned_dep = sanitize_resource(dep, 'Deployment')
namespace_resources[namespace]['deployments'][dep_name] = cleaned_dep
# 查找匹配的Service
pod_labels = dep.spec.template.metadata.labels
matched_svcs = [
svc for svc in all_services
if svc.metadata.namespace == namespace
and svc.spec.selector
and all(pod_labels.get(k) == v for k, v in svc.spec.selector.items())
]
# 清理并添加匹配的Service
for svc in matched_svcs:
cleaned_svc = sanitize_resource(svc, 'Service')
namespace_resources[namespace]['services'][svc.metadata.name] = cleaned_svc
# 处理Ingress
for ingress in all_ingresses:
namespace = ingress.metadata.namespace
if namespace not in namespace_resources:
namespace_resources[namespace] = {'deployments': {}, 'services': {}, 'ingresses': {}}
# 清理并添加Ingress
cleaned_ingress = sanitize_resource(ingress, 'Ingress')
namespace_resources[namespace]['ingresses'][ingress.metadata.name] = cleaned_ingress
# 导出Deployment+Service到deployments目录
for namespace, resources in namespace_resources.items():
# 在deployments目录下创建命名空间子目录
ns_deployments_dir = os.path.join(deployments_dir, namespace)
os.makedirs(ns_deployments_dir, exist_ok=True)
# 导出每个Deployment及其相关Service
for dep_name, deployment in resources['deployments'].items():
# 查找与此Deployment相关的Service
related_services = []
for svc_name, service in resources['services'].items():
# 简单匹配Service名称包含Deployment名称
if dep_name in svc_name:
related_services.append(service)
# 组合Deployment和Service
combined_resources = [deployment]
combined_resources.extend(related_services)
# 生成文件名
filename = f"{dep_name}.yaml"
filepath = os.path.join(ns_deployments_dir, filename)
# 写入文件确保正确的YAML格式
with open(filepath, 'w') as f:
yaml.dump_all(combined_resources, f, default_flow_style=False, sort_keys=False, explicit_start=True)
print(f"Exported {len(combined_resources)} resources to {filepath}")
# 导出没有关联Deployment的独立Service
exported_services = set()
for deployment_name in resources['deployments']:
for svc_name in resources['services']:
if deployment_name in svc_name:
exported_services.add(svc_name)
for svc_name, service in resources['services'].items():
if svc_name not in exported_services:
filename = f"{svc_name}.yaml"
filepath = os.path.join(ns_deployments_dir, filename)
with open(filepath, 'w') as f:
yaml.dump(service, f, default_flow_style=False, sort_keys=False)
print(f"Exported standalone Service to {filepath}")
# 导出Ingress到ingress目录
for namespace, resources in namespace_resources.items():
if resources['ingresses']:
# 在ingress目录下创建命名空间子目录
ns_ingress_dir = os.path.join(ingress_dir, namespace)
os.makedirs(ns_ingress_dir, exist_ok=True)
# 导出所有Ingress
for ing_name, ingress in resources['ingresses'].items():
filename = f"{ing_name}.yaml"
filepath = os.path.join(ns_ingress_dir, filename)
with open(filepath, 'w') as f:
yaml.dump(ingress, f, default_flow_style=False, sort_keys=False)
print(f"Exported Ingress to {filepath}")
if __name__ == "__main__":
export_resources()

View File

@ -0,0 +1,89 @@
'''
Author: Logan.Li
Gitee: https://gitee.com/attacker
email: admin@attacker.club
Date: 2025-01-05 12:25:52
LastEditTime: 2025-03-03 00:06:37
Description:
pip install kubernetes
'''
from kubernetes import client, config
import os
import yaml
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
config.load_kube_config()
api_client = client.ApiClient()
def sanitize(obj):
api_version = getattr(obj, 'api_version', None)
kind = getattr(obj, 'kind', None)
obj_yaml = api_client.sanitize_for_serialization(obj)
obj_yaml['apiVersion'] = api_version or ('v1' if isinstance(obj, client.V1Service) else 'apps/v1')
obj_yaml['kind'] = kind or ('Service' if isinstance(obj, client.V1Service) else 'Deployment')
if isinstance(obj, client.V1Service):
if 'clusterIPs' in obj_yaml['spec']:
del obj_yaml['spec']['clusterIPs']
if 'clusterIP' in obj_yaml['spec']:
del obj_yaml['spec']['clusterIP']
metadata_fields = ['selfLink', 'generation', 'creationTimestamp', 'resourceVersion', 'uid', 'managedFields']
for field in metadata_fields:
obj_yaml['metadata'].pop(field, None)
obj_yaml['metadata'].setdefault('annotations', {})
obj_yaml['metadata'].setdefault('labels', {})
annotations_to_remove = [
'deployment.kubernetes.io/revision',
'kubectl.kubernetes.io/restartedAt',
'kubectl.kubernetes.io/last-applied-configuration'
]
for ann in annotations_to_remove:
obj_yaml['metadata']['annotations'].pop(ann, None)
if 'strategy' in obj_yaml.get('spec', {}):
del obj_yaml['spec']['strategy']
return obj_yaml
def merge_and_export(deployment, services, namespace):
combined = [sanitize(deployment)]
combined.extend([sanitize(svc) for svc in services])
dir_path = os.path.join(os.getcwd(), namespace)
os.makedirs(dir_path, exist_ok=True)
filename = f"{deployment.metadata.name}-combined.yaml"
with open(os.path.join(dir_path, filename), 'w') as f:
yaml.dump_all(combined, f, default_flow_style=False)
print(f"Exported {len(services)} services with deployment to {filename}")
def main():
apps_api = client.AppsV1Api(api_client)
core_api = client.CoreV1Api(api_client)
all_deployments = apps_api.list_deployment_for_all_namespaces().items
all_services = core_api.list_service_for_all_namespaces().items
for dep in all_deployments:
namespace = dep.metadata.namespace
pod_labels = dep.spec.template.metadata.labels
matched_svcs = [
svc for svc in all_services
if svc.metadata.namespace == namespace
and svc.spec.selector
and all(pod_labels.get(k) == v for k, v in svc.spec.selector.items())
]
if matched_svcs:
merge_and_export(dep, matched_svcs, namespace)
if __name__ == "__main__":
main()

View File

@ -1,5 +1,16 @@
'''
Author: Logan.Li
Gitee: https://gitee.com/attacker
email: admin@attacker.club
Date: 2025-01-05 12:25:52
LastEditTime: 2025-01-05 15:28:03
Description:
pip install kubernetes
'''
'''
导出deployment + svc 配置信息
pip install kubernetes
'''
from kubernetes import client, config
import os
@ -54,6 +65,7 @@ def sanitize(obj):
obj_yaml['metadata'].setdefault('annotations', {})
obj_yaml['metadata'].setdefault('labels', {})
# 移除不需要的注解
if 'deployment.kubernetes.io/revision' in obj_yaml['metadata']['annotations']:
del obj_yaml['metadata']['annotations']['deployment.kubernetes.io/revision']

View File

@ -0,0 +1,79 @@
'''
Author: Logan.Li
Gitee: https://gitee.com/attacker
email: admin@attacker.club
Date: 2025-01-05 12:25:52
LastEditTime: 2025-01-05 15:28:03
Description:
pip install kubernetes
'''
'''
导出ingress 配置信息
'''
from kubernetes import client, config
import os
import yaml
# 禁用不安全请求警告
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# 创建API客户端
config.load_kube_config()
api_client = client.ApiClient()
def sanitize(obj):
# 获取原始对象的apiVersion和kind字段
api_version = getattr(obj, 'api_version', None)
kind = getattr(obj, 'kind', None)
obj_yaml = api_client.sanitize_for_serialization(obj)
# 如果apiVersion或kind为空则使用默认值
obj_yaml['apiVersion'] = api_version or 'networking.k8s.io/v1' # 对于Ingress
obj_yaml['kind'] = kind or 'Ingress'
# 删除不需要的字段
if 'selfLink' in obj_yaml['metadata']:
del obj_yaml['metadata']['selfLink']
if 'generation' in obj_yaml['metadata']:
del obj_yaml['metadata']['generation']
del obj_yaml['metadata']['creationTimestamp']
del obj_yaml['metadata']['resourceVersion']
del obj_yaml['metadata']['uid']
del obj_yaml['status']
if 'managedFields' in obj_yaml['metadata']:
del obj_yaml['metadata']['managedFields']
# 清空 annotations 和 labels 字段
obj_yaml['metadata'].setdefault('annotations', {})
obj_yaml['metadata'].setdefault('labels', {})
# 移除不需要的注解
if 'kubectl.kubernetes.io/last-applied-configuration' in obj_yaml['metadata']['annotations']:
del obj_yaml['metadata']['annotations']['kubectl.kubernetes.io/last-applied-configuration']
return obj_yaml
def export_ingress(ingress, namespace):
ingress_yaml = sanitize(ingress)
# 生成YAML文件
yaml_file = f"{ingress.metadata.name}-ingress.yaml"
dir_path = os.path.join(os.getcwd(), namespace)
if not os.path.exists(dir_path):
os.makedirs(dir_path) # 使用makedirs以递归创建目录
with open(os.path.join(dir_path, yaml_file), 'w') as f:
yaml.dump(ingress_yaml, f, default_flow_style=False)
print(f"{yaml_file} created in {namespace} directory.")
# 获取所有Ingress
ingresses = client.NetworkingV1Api(api_client).list_ingress_for_all_namespaces(watch=False)
# 遍历所有Ingress资源并导出
for ingress in ingresses.items:
export_ingress(ingress, ingress.metadata.namespace)