## 初始化每个节点环境 ### 配置 k8s 属性 ```bash # k8s containerd 版本 export K8S_VERSION=1.32 export CONTAINERD_VERSION=2.0.2 export CALICO_VERSION=3.29.1 # 镜像源 k8s_version export mirrors=docker.martin98.com/k8s export k8s_version=1.32.1 # 网段配置 export pod_subnet=10.101.0.0/16 export service_subnet=10.100.0.0/16 curl -sSL https://git.martin98.com/MartinFarm/init/raw/branch/main/init-k8s.sh | bash ``` ### 初始化 单 master 节点 ```bash cat < kubeadm-config.yaml apiVersion: kubeadm.k8s.io/v1beta4 bootstrapTokens: - groups: - system:bootstrappers:kubeadm:default-node-token token: $(openssl rand -hex 3).$(openssl rand -hex 8) ttl: 24h0m0s usages: - signing - authentication kind: InitConfiguration localAPIEndpoint: advertiseAddress: $(hostname -I | awk '{print $1}') bindPort: 6443 nodeRegistration: criSocket: unix:///var/run/containerd/containerd.sock imagePullPolicy: IfNotPresent imagePullSerial: true name: $(hostname) taints: null timeouts: controlPlaneComponentHealthCheck: 4m0s discovery: 5m0s etcdAPICall: 2m0s kubeletHealthCheck: 4m0s kubernetesAPICall: 1m0s tlsBootstrap: 5m0s upgradeManifests: 5m0s --- apiServer: {} apiVersion: kubeadm.k8s.io/v1beta4 caCertificateValidityPeriod: 87600h0m0s certificateValidityPeriod: 8760h0m0s certificatesDir: /etc/kubernetes/pki clusterName: kubernetes controllerManager: {} dns: imageRepository: $mirrors/coredns encryptionAlgorithm: RSA-2048 etcd: local: dataDir: /var/lib/etcd imageRepository: $mirrors kind: ClusterConfiguration kubernetesVersion: $k8s_version networking: dnsDomain: cluster.local podSubnet: $pod_subnet serviceSubnet: $service_subnet proxy: {} scheduler: {} EOF # 开始安装 kubeadm init --config=kubeadm-config.yaml --upload-certs --v=9 # 配置 mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config export KUBECONFIG=/etc/kubernetes/admin.conf # 安装 operator kubectl create -f https://mirrors.martin98.com/repository/proxy/raw.githubusercontent.com/projectcalico/calico/v$CALICO_VERSION/manifests/tigera-operator.yaml kubectl wait --for=condition=Ready pods --all -n tigera-operator --timeout=300s # 安装 calico curl https://mirrors.martin98.com/repository/proxy/raw.githubusercontent.com/projectcalico/calico/v$CALICO_VERSION/manifests/custom-resources.yaml -O sed -i "s|\(cidr: \).*|\1$pod_subnet|" custom-resources.yaml kubectl create -f custom-resources.yaml kubectl wait --for=condition=Ready pods --all -n calico-system --timeout=300s kubectl wait --for=condition=Ready pods --all -n calico-apiserver --timeout=300s ``` ### 初始化高可用集群 #### 所有 master ```bash # 配置 高可用 VIP apt install -y keepalived haproxy export vip_ip=10.1.3.100 export vip=$vip_ip/16 export masters=10.1.3.101,10.1.3.102,10.1.3.103 current_ip=$(hostname -I | awk '{print $1}') first_master=$(echo $masters | cut -d',' -f1) if [ "$current_ip" == "$first_master" ]; then state=MASTER else state=BACKUP fi cat < kubeadm-config.yaml apiVersion: kubeadm.k8s.io/v1beta4 bootstrapTokens: - groups: - system:bootstrappers:kubeadm:default-node-token token: $(openssl rand -hex 3).$(openssl rand -hex 8) ttl: 24h0m0s usages: - signing - authentication kind: InitConfiguration localAPIEndpoint: advertiseAddress: $(hostname -I | awk '{print $1}') bindPort: 6443 nodeRegistration: criSocket: unix:///var/run/containerd/containerd.sock imagePullPolicy: IfNotPresent imagePullSerial: true name: $(hostname) taints: null timeouts: controlPlaneComponentHealthCheck: 4m0s discovery: 5m0s etcdAPICall: 2m0s kubeletHealthCheck: 4m0s kubernetesAPICall: 1m0s tlsBootstrap: 5m0s upgradeManifests: 5m0s --- apiServer: {} apiVersion: kubeadm.k8s.io/v1beta4 caCertificateValidityPeriod: 87600h0m0s certificateValidityPeriod: 8760h0m0s certificatesDir: /etc/kubernetes/pki clusterName: kubernetes controlPlaneEndpoint: "$vip_ip:6443" controllerManager: {} dns: imageRepository: $mirrors/coredns encryptionAlgorithm: RSA-2048 etcd: local: dataDir: /var/lib/etcd imageRepository: $mirrors kind: ClusterConfiguration kubernetesVersion: $k8s_version networking: dnsDomain: cluster.local podSubnet: $pod_subnet serviceSubnet: $service_subnet proxy: {} scheduler: {} EOF # 开始安装 kubeadm init --config=kubeadm-config.yaml --upload-certs --v=9 # 配置 mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config export KUBECONFIG=/etc/kubernetes/admin.conf # 安装 operator kubectl create -f https://mirrors.martin98.com/repository/proxy/raw.githubusercontent.com/projectcalico/calico/v$CALICO_VERSION/manifests/tigera-operator.yaml kubectl wait --for=condition=Ready pods --all -n tigera-operator --timeout=300s # 安装 calico curl https://mirrors.martin98.com/repository/proxy/raw.githubusercontent.com/projectcalico/calico/v$CALICO_VERSION/manifests/custom-resources.yaml -O sed -i "s|\(cidr: \).*|\1$pod_subnet|" custom-resources.yaml kubectl create -f custom-resources.yaml kubectl wait --for=condition=Ready pods --all -n calico-system --timeout=300s kubectl wait --for=condition=Ready pods --all -n calico-apiserver --timeout=300s ``` ### 加入集群 ```bash kubeadm token create --print-join-command # worker 加入 kubeadm join 10.1.2.200:6443 \ --token ??? \ --discovery-token-ca-cert-hash ??? # admin 加入 kubeadm join 10.1.2.200:6443 \ --token ??? \ --discovery-token-ca-cert-hash ??? \ --control-plane # 验证集群 kubectl get nodes ``` ### 其他说明 ```bash # 拉取镜像失败可能导致节点不正常 systemctl restart kubelet systemctl restart containerd # 或 docker ```