[+] 增加高可用集群部署文档
All checks were successful
Build Docker Image / Explore-Gitea-Actions (push) Successful in 11s

This commit is contained in:
Martin 2025-01-26 23:06:54 +08:00
parent 1cb80efce5
commit 7b41648aa2

View File

@ -13,7 +13,7 @@ export pod_subnet=10.101.0.0/16
export service_subnet=10.100.0.0/16
curl -sSL https://git.martin98.com/MartinFarm/init/raw/branch/main/init-k8s.sh | bash
```
### 初始化 master 节点
### 初始化 master 节点
```bash
cat <<EOF > kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta4
@ -33,7 +33,7 @@ nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
imagePullSerial: true
name: k8s-test
name: $(hostname)
taints: null
timeouts:
controlPlaneComponentHealthCheck: 4m0s
@ -78,8 +78,152 @@ export KUBECONFIG=/etc/kubernetes/admin.conf
# 安装 operator
kubectl create -f https://mirrors.martin98.com/repository/proxy/raw.githubusercontent.com/projectcalico/calico/v$CALICO_VERSION/manifests/tigera-operator.yaml
kubectl get pods -n tigera-operator
kubectl wait --for=condition=Ready pods --all -n tigera-operator --timeout=300s
# 安装 calico
curl https://mirrors.martin98.com/repository/proxy/raw.githubusercontent.com/projectcalico/calico/v$CALICO_VERSION/manifests/custom-resources.yaml -O
sed -i "s|\(cidr: \).*|\1$pod_subnet|" custom-resources.yaml
kubectl create -f custom-resources.yaml
kubectl wait --for=condition=Ready pods --all -n calico-system --timeout=300s
kubectl wait --for=condition=Ready pods --all -n calico-apiserver --timeout=300s
```
### 初始化高可用集群
#### 所有 master
```bash
# 配置 高可用 VIP
apt install -y keepalived haproxy
export vip_ip=10.1.3.100
export vip=$vip_ip/16
export masters=10.1.3.101,10.1.3.102,10.1.3.103
current_ip=$(hostname -I | awk '{print $1}')
first_master=$(echo $masters | cut -d',' -f1)
if [ "$current_ip" == "$first_master" ]; then
state=MASTER
else
state=BACKUP
fi
cat <<EOF | sudo tee /etc/keepalived/keepalived.conf
vrrp_instance VI_1 {
state $state
interface eth0
virtual_router_id 51
priority 100
advert_int 1
virtual_ipaddress {
$vip
}
}
EOF
IFS=',' read -r -a master_ips <<< "$masters"
backend_config=""
for ((i=0; i<${#master_ips[@]}; i++)); do
backend_config+=" server master-$((i+1)) ${master_ips[$i]}:6443 check"$'\n'
done
cat <<EOF | sudo tee /etc/haproxy/haproxy.cfg
global
log 127.0.0.1 local0 notice
maxconn 10000
chroot /usr/local/haproxy
user haproxy
group haproxy
daemon
# ssl 优化
tune.ssl.default-dh-param 2048
tune.bufsize 32768
# 默认配置
defaults
log global
option httplog
option dontlognull
timeout connect 5000
timeout client 50000
timeout server 50000
frontend k8s-api
bind *:6443
mode tcp
option tcplog
default_backend k8s-api-backend
backend k8s-api-backend
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
$backend_config
EOF
groupadd -r haproxy || echo "用户组已存在"
useradd -r -g haproxy -s /sbin/nologin haproxy || echo "用户已存在"
sudo systemctl restart keepalived && sudo systemctl restart haproxy
```
### Master-01
```bash
# 配置
cat <<EOF > kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta4
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: $(openssl rand -hex 3).$(openssl rand -hex 8)
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: $(hostname -I | awk '{print $1}')
bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
imagePullSerial: true
name: $(hostname)
taints: null
timeouts:
controlPlaneComponentHealthCheck: 4m0s
discovery: 5m0s
etcdAPICall: 2m0s
kubeletHealthCheck: 4m0s
kubernetesAPICall: 1m0s
tlsBootstrap: 5m0s
upgradeManifests: 5m0s
---
apiServer: {}
apiVersion: kubeadm.k8s.io/v1beta4
caCertificateValidityPeriod: 87600h0m0s
certificateValidityPeriod: 8760h0m0s
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "$vip_ip:6443"
controllerManager: {}
dns:
imageRepository: $mirrors/coredns
encryptionAlgorithm: RSA-2048
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: $mirrors
kind: ClusterConfiguration
kubernetesVersion: $k8s_version
networking:
dnsDomain: cluster.local
podSubnet: $pod_subnet
serviceSubnet: $service_subnet
proxy: {}
scheduler: {}
EOF
# 开始安装
kubeadm init --config=kubeadm-config.yaml --upload-certs --v=9
# 配置
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf
# 安装 operator
kubectl create -f https://mirrors.martin98.com/repository/proxy/raw.githubusercontent.com/projectcalico/calico/v$CALICO_VERSION/manifests/tigera-operator.yaml
kubectl wait --for=condition=Ready pods --all -n tigera-operator --timeout=300s
# 安装 calico
curl https://mirrors.martin98.com/repository/proxy/raw.githubusercontent.com/projectcalico/calico/v$CALICO_VERSION/manifests/custom-resources.yaml -O