Martin a4e6bf8d97
All checks were successful
Build Docker Image / Explore-Gitea-Actions (push) Successful in 11s
[+] 增加 crictl 配置
2025-01-26 23:40:19 +08:00
..
2025-01-26 23:40:19 +08:00

初始化每个节点环境

配置 k8s 属性

# k8s containerd 版本
export K8S_VERSION=1.32
export CONTAINERD_VERSION=2.0.2
export CALICO_VERSION=3.29.1
# 镜像源 k8s_version
export mirrors=docker.martin98.com/k8s
export k8s_version=1.32.1
# 网段配置
export pod_subnet=10.101.0.0/16
export service_subnet=10.100.0.0/16
curl -sSL https://git.martin98.com/MartinFarm/init/raw/branch/main/init-k8s.sh | bash

初始化 单 master 节点

cat <<EOF > kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta4
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: $(openssl rand -hex 3).$(openssl rand -hex 8)
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: $(hostname -I | awk '{print $1}')
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  imagePullSerial: true
  name: $(hostname)
  taints: null
timeouts:
  controlPlaneComponentHealthCheck: 4m0s
  discovery: 5m0s
  etcdAPICall: 2m0s
  kubeletHealthCheck: 4m0s
  kubernetesAPICall: 1m0s
  tlsBootstrap: 5m0s
  upgradeManifests: 5m0s
---
apiServer: {}
apiVersion: kubeadm.k8s.io/v1beta4
caCertificateValidityPeriod: 87600h0m0s
certificateValidityPeriod: 8760h0m0s
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: 
  imageRepository: $mirrors/coredns
encryptionAlgorithm: RSA-2048
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: $mirrors
kind: ClusterConfiguration
kubernetesVersion: $k8s_version
networking:
  dnsDomain: cluster.local
  podSubnet: $pod_subnet
  serviceSubnet: $service_subnet
proxy: {}
scheduler: {}
EOF

# 开始安装
kubeadm init --config=kubeadm-config.yaml --upload-certs --v=9
# 配置
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf

# 安装 operator
kubectl create -f https://mirrors.martin98.com/repository/proxy/raw.githubusercontent.com/projectcalico/calico/v$CALICO_VERSION/manifests/tigera-operator.yaml
kubectl wait --for=condition=Ready pods --all -n tigera-operator --timeout=300s
# 安装 calico
curl https://mirrors.martin98.com/repository/proxy/raw.githubusercontent.com/projectcalico/calico/v$CALICO_VERSION/manifests/custom-resources.yaml -O
sed -i "s|\(cidr: \).*|\1$pod_subnet|" custom-resources.yaml
kubectl create -f custom-resources.yaml
kubectl wait --for=condition=Ready pods --all -n calico-system --timeout=300s
kubectl wait --for=condition=Ready pods --all -n calico-apiserver --timeout=300s

初始化高可用集群

所有 master

# 配置 高可用 VIP
apt install -y keepalived haproxy
export vip_ip=10.1.3.100
export vip=$vip_ip/16
export masters=10.1.3.101,10.1.3.102,10.1.3.103
current_ip=$(hostname -I | awk '{print $1}')
first_master=$(echo $masters | cut -d',' -f1)
if [ "$current_ip" == "$first_master" ]; then
    state=MASTER
else
    state=BACKUP
fi
cat <<EOF | sudo tee /etc/keepalived/keepalived.conf
vrrp_instance VI_1 {
    state $state
    interface eth0
    virtual_router_id 51
    priority 100
    advert_int 1
    virtual_ipaddress {
        $vip
    }
}
EOF
IFS=',' read -r -a master_ips <<< "$masters"
backend_config=""
for ((i=0; i<${#master_ips[@]}; i++)); do
    backend_config+="    server master-$((i+1)) ${master_ips[$i]}:6443 check"$'\n'
done
cat <<EOF | sudo tee /etc/haproxy/haproxy.cfg
global
    log 127.0.0.1 local0 notice
    maxconn 10000
    chroot /usr/local/haproxy
    user haproxy
    group haproxy
    daemon
    # ssl 优化
    tune.ssl.default-dh-param 2048
    tune.bufsize 32768

# 默认配置
defaults
    log global
    option  httplog
    option  dontlognull
        timeout connect 5000
        timeout client 50000
        timeout server 50000

frontend k8s-api
    bind *:6443
    mode tcp
    option tcplog
    default_backend k8s-api-backend

backend k8s-api-backend
    mode tcp
    option tcplog
    option tcp-check
    balance roundrobin
    default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
$backend_config
EOF
groupadd -r haproxy || echo "用户组已存在"
useradd -r -g haproxy -s /sbin/nologin haproxy || echo "用户已存在"
sudo systemctl restart keepalived && sudo systemctl restart haproxy

Master-01

# 配置
cat <<EOF > kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta4
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: $(openssl rand -hex 3).$(openssl rand -hex 8)
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: $(hostname -I | awk '{print $1}')
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  imagePullSerial: true
  name: $(hostname)
  taints: null
timeouts:
  controlPlaneComponentHealthCheck: 4m0s
  discovery: 5m0s
  etcdAPICall: 2m0s
  kubeletHealthCheck: 4m0s
  kubernetesAPICall: 1m0s
  tlsBootstrap: 5m0s
  upgradeManifests: 5m0s
---
apiServer: {}
apiVersion: kubeadm.k8s.io/v1beta4
caCertificateValidityPeriod: 87600h0m0s
certificateValidityPeriod: 8760h0m0s
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "$vip_ip:6443"
controllerManager: {}
dns: 
  imageRepository: $mirrors/coredns
encryptionAlgorithm: RSA-2048
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: $mirrors
kind: ClusterConfiguration
kubernetesVersion: $k8s_version
networking:
  dnsDomain: cluster.local
  podSubnet: $pod_subnet
  serviceSubnet: $service_subnet
proxy: {}
scheduler: {}
EOF

# 开始安装
kubeadm init --config=kubeadm-config.yaml --upload-certs --v=9
# 配置
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf

# 安装 operator
kubectl create -f https://mirrors.martin98.com/repository/proxy/raw.githubusercontent.com/projectcalico/calico/v$CALICO_VERSION/manifests/tigera-operator.yaml
kubectl wait --for=condition=Ready pods --all -n tigera-operator --timeout=300s
# 安装 calico
curl https://mirrors.martin98.com/repository/proxy/raw.githubusercontent.com/projectcalico/calico/v$CALICO_VERSION/manifests/custom-resources.yaml -O
sed -i "s|\(cidr: \).*|\1$pod_subnet|" custom-resources.yaml
kubectl create -f custom-resources.yaml
kubectl wait --for=condition=Ready pods --all -n calico-system --timeout=300s
kubectl wait --for=condition=Ready pods --all -n calico-apiserver --timeout=300s

加入集群

kubeadm token create --print-join-command
# worker 加入
kubeadm join 10.1.2.200:6443 \
  --token ??? \
  --discovery-token-ca-cert-hash ???
# admin 加入
kubeadm join 10.1.2.200:6443 \
  --token ??? \
  --discovery-token-ca-cert-hash ??? \
  --control-plane
# 验证集群
kubectl get nodes

其他说明

# 拉取镜像失败可能导致节点不正常
systemctl restart kubelet
systemctl restart containerd  # 或 docker