init/init-k8s.sh
Martin 6d8942ec11
All checks were successful
Build Docker Image / Explore-Gitea-Actions (push) Successful in 10s
[*] fix debian-security 源
2025-04-03 11:14:29 +08:00

282 lines
8.8 KiB
Bash

#!/bin/bash
lsb_dist=$(cat /etc/*release | grep ^ID= | cut -d= -f2) # ubuntu or debian?
release=$(cat /etc/*release | grep VERSION_CODENAME | cut -d= -f2) # ubuntu(jammy oracular) debian(bookworm)....
# 默认值
K8S_RELEASE=${K8S_VERSION:-1.32.3} # 如果未设置,使用默认值 1.32.3
K8S_VERSION=${K8S_RELEASE%.*}
CONTAINERD_VERSION=${CONTAINERD_VERSION:-2.0.2}
CALICO_VERSION=${CALICO_VERSION:-3.29.1}
MIRRORS=${MIRRORS:-docker.martin98.com/k8s} # 如果未设置,默认使用集群镜像
# 更新 apt
curl -sSL https://git.martin98.com/MartinFarm/init/raw/branch/main/init-apt.sh | bash
curl -fsSL https://mirrors.martin98.com/repository/docker-ce/linux/$lsb_dist/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://mirrors.martin98.com/repository/docker-ce/linux/$lsb_dist $release stable" > /etc/apt/sources.list.d/docker.list
curl -fsSL https://mirrors.martin98.com/repository/kubernetes/core/stable/v$K8S_VERSION/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://mirrors.martin98.com/repository/kubernetes/core/stable/v$K8S_VERSION/deb/ /" > /etc/apt/sources.list.d/kubernetes.list
# k8s 相关环境
apt update && apt install -y curl apt-transport-https ca-certificates gnupg runc
# 调整内核 关闭 swap
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sudo swapoff -a && sed -i '/swap/d' /etc/fstab && sudo sysctl --system && sudo modprobe overlay && sudo modprobe br_netfilter
# 安装 containerd
wget https://mirrors.martin98.com/repository/proxy/github.com/containerd/containerd/releases/download/v$CONTAINERD_VERSION/containerd-$CONTAINERD_VERSION-linux-amd64.tar.gz
tar Cxzvf /usr/local containerd-$CONTAINERD_VERSION-linux-amd64.tar.gz
rm containerd-$CONTAINERD_VERSION-linux-amd64.tar.gz
wget https://mirrors.martin98.com/repository/proxy/raw.githubusercontent.com/containerd/containerd/main/containerd.service
sudo mv containerd.service /etc/systemd/system/
sudo systemctl daemon-reload
sudo systemctl start containerd
sudo systemctl enable --now containerd
# 初始化 containerd 配置
mkdir "/etc/containerd"
containerd config default > /etc/containerd/config.toml
sed -i "s|sandbox = 'registry.k8s.io|sandbox = 'docker.martin98.com/k8s|g" /etc/containerd/config.toml
sed -ri '0,/(config_path).*/s@(config_path).*@\1 = "/etc/containerd/certs.d"@' /etc/containerd/config.toml
sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
cat <<EOF >> /etc/crictl.yaml
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: true
EOF
cat <<EOF >> /etc/containerd/config.toml
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
EOF
# 定义配置目录
CONFIG_DIR="/etc/containerd/certs.d"
mkdir -pv "$CONFIG_DIR/docker.io"
cat <<EOF > "$CONFIG_DIR/docker.io/hosts.toml"
server = "https://docker.io"
[host."https://docker.martin98.com/v2/docker"]
capabilities = ["pull", "resolve"]
override_path = true
EOF
mkdir -pv "$CONFIG_DIR/k8s.gcr.io"
cat <<EOF > "$CONFIG_DIR/k8s.gcr.io/hosts.toml"
server = "https://k8s.gcr.io"
[host."https://docker.martin98.com/v2/k8s"]
capabilities = ["pull", "resolve"]
override_path = true
EOF
mkdir -pv "$CONFIG_DIR/gcr.io"
cat <<EOF > "$CONFIG_DIR/gcr.io/hosts.toml"
server = "https://gcr.io"
[host."https://docker.martin98.com/v2/gcr"]
capabilities = ["pull", "resolve"]
override_path = true
EOF
mkdir -pv "$CONFIG_DIR/ghcr.io"
cat <<EOF > "$CONFIG_DIR/ghcr.io/hosts.toml"
server = "https://ghcr.io"
[host."https://docker.martin98.com/v2/ghcr"]
capabilities = ["pull", "resolve"]
override_path = true
EOF
mkdir -pv "$CONFIG_DIR/quay.io"
cat <<EOF > "$CONFIG_DIR/quay.io/hosts.toml"
server = "https://quay.io"
[host."https://docker.martin98.com/v2/quay"]
capabilities = ["pull", "resolve"]
override_path = true
EOF
mkdir -pv "$CONFIG_DIR/registry.k8s.io"
cat <<EOF > "$CONFIG_DIR/registry.k8s.io/hosts.toml"
server = "https://registry.k8s.io"
[host."https://docker.martin98.com/v2/k8s"]
capabilities = ["pull", "resolve"]
override_path = true
EOF
# crictl --debug pull quay.io/k8scsi/csi-resizer:v0.5.0
sudo systemctl restart containerd && sudo systemctl enable --now containerd
# 安装 kubeadm kubelet kubectl
apt install -y kubeadm kubelet kubectl && apt-mark hold kubeadm kubelet kubectl
echo 'KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"' > kubeadm-config.yaml
echo "k8s 运行环境安装成功"
# 检查是否 master 节点
current_ip=$(hostname -I | awk '{print $1}')
if ! echo "$masters" | grep -qw "$current_ip"; then
echo "初始化 worker $current_ip 成功"
exit 0
fi
# 安装 keepalived haproxy
apt install -y keepalived
# 检查是否为 Master-01
first_master=$(echo $masters | cut -d',' -f1)
if [ "$current_ip" == "$first_master" ]; then
state=MASTER
priority=200
else
state=BACKUP
priority=100
fi
# 初始化 VIP
cat <<EOF | sudo tee /etc/keepalived/keepalived.conf
vrrp_instance VI_1 {
state $state
interface eth0
virtual_router_id 51
priority $priority
advert_int 1
virtual_ipaddress {
$vip
}
}
EOF
sudo systemctl restart keepalived
systemctl status keepalived &
echo "初始化 master VIP $current_ip 成功"
if [ "$current_ip" != "$first_master" ]; then
echo "初始化 master $current_ip 成功"
exit 0
fi
# 配置
cat <<EOF > kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta4
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: $(openssl rand -hex 3).$(openssl rand -hex 8)
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: $(hostname -I | awk '{print $1}')
bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
imagePullSerial: true
name: $(hostname)
taints: null
timeouts:
controlPlaneComponentHealthCheck: 4m0s
discovery: 5m0s
etcdAPICall: 2m0s
kubeletHealthCheck: 4m0s
kubernetesAPICall: 1m0s
tlsBootstrap: 5m0s
upgradeManifests: 5m0s
---
apiServer: {}
apiVersion: kubeadm.k8s.io/v1beta4
caCertificateValidityPeriod: 87600h0m0s
certificateValidityPeriod: 8760h0m0s
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "$vip_ip:6443"
controllerManager: {}
dns:
imageRepository: $MIRRORS/coredns
encryptionAlgorithm: RSA-2048
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: $MIRRORS
kind: ClusterConfiguration
kubernetesVersion: "$K8S_RELEASE"
networking:
dnsDomain: cluster.local
podSubnet: $pod_subnet
serviceSubnet: $service_subnet
proxy: {}
scheduler: {}
EOF
# 开始安装
kubeadm init --config=kubeadm-config.yaml --upload-certs --v=9
kubectl get nodes
echo "初始化 master $current_ip 成功,开始配置网络"
# 配置
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf
# 安装 operator
kubectl create -f https://mirrors.martin98.com/repository/proxy/raw.githubusercontent.com/projectcalico/calico/v$CALICO_VERSION/manifests/tigera-operator.yaml
sleep 5
kubectl wait --for=condition=Ready pods --all -n tigera-operator --timeout=300s
echo "初始化 master $current_ip operator 成功"
# 安装 calico
curl https://mirrors.martin98.com/repository/proxy/raw.githubusercontent.com/projectcalico/calico/v$CALICO_VERSION/manifests/custom-resources.yaml -O
sed -i "s|\(cidr: \).*|\1$pod_subnet|" custom-resources.yaml
kubectl create -f custom-resources.yaml
sleep 5
kubectl wait --for=condition=Ready pods --all -n calico-system --timeout=300s
kubectl wait --for=condition=Ready pods --all -n calico-apiserver --timeout=300s
echo "初始化 master $current_ip calico 成功"
kubectl get nodes
OUTPUT=$(kubeadm token create --print-join-command)
# 提取 token 和 discovery-token-ca-cert-hash
TOKEN=$(echo "$OUTPUT" | grep -oP 'token \K[\w.]+')
TOKEN_HASH=$(echo "$OUTPUT" | grep -oP 'discovery-token-ca-cert-hash \K.*')
CERTS=$(kubeadm init phase upload-certs --upload-certs | sed -n '$p')
cat <<EOF
------------------------------------------------------------------------------------
初始化 master $current_ip 成功
$OUTPUT
# master 加入
kubeadm join $vip_ip:6443 \\
--token $TOKEN \\
--discovery-token-ca-cert-hash $TOKEN_HASH \\
--control-plane --certificate-key $CERTS
# worker 加入
kubeadm join $vip_ip:6443 \\
--token $TOKEN \\
--discovery-token-ca-cert-hash $TOKEN_HASH
------------------------------------------------------------------------------------
EOF
kubectl get nodes