[*] 防止阻塞
All checks were successful
Build Docker Image / Explore-Gitea-Actions (push) Successful in 10s
All checks were successful
Build Docker Image / Explore-Gitea-Actions (push) Successful in 10s
This commit is contained in:
parent
e7ce179d93
commit
9a5dd11be4
@ -123,7 +123,7 @@ echo "k8s 运行环境安装成功"
|
|||||||
current_ip=$(hostname -I | awk '{print $1}')
|
current_ip=$(hostname -I | awk '{print $1}')
|
||||||
if ! echo "$masters" | grep -qw "$current_ip"; then
|
if ! echo "$masters" | grep -qw "$current_ip"; then
|
||||||
echo "初始化 worker $current_ip 成功"
|
echo "初始化 worker $current_ip 成功"
|
||||||
return 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
# 安装 keepalived haproxy
|
# 安装 keepalived haproxy
|
||||||
apt install -y keepalived haproxy
|
apt install -y keepalived haproxy
|
||||||
@ -151,7 +151,7 @@ vrrp_instance VI_1 {
|
|||||||
}
|
}
|
||||||
EOF
|
EOF
|
||||||
sudo systemctl restart keepalived
|
sudo systemctl restart keepalived
|
||||||
systemctl status keepalived
|
systemctl status keepalived &
|
||||||
echo "初始化 master VIP $current_ip 成功"
|
echo "初始化 master VIP $current_ip 成功"
|
||||||
|
|
||||||
# 初始化 haproxy
|
# 初始化 haproxy
|
||||||
@ -198,7 +198,7 @@ EOF
|
|||||||
groupadd -r haproxy || echo "用户组已存在"
|
groupadd -r haproxy || echo "用户组已存在"
|
||||||
useradd -r -g haproxy -s /sbin/nologin haproxy || echo "用户已存在"
|
useradd -r -g haproxy -s /sbin/nologin haproxy || echo "用户已存在"
|
||||||
sudo systemctl restart keepalived && sudo systemctl restart haproxy
|
sudo systemctl restart keepalived && sudo systemctl restart haproxy
|
||||||
systemctl status haproxy
|
systemctl status haproxy &
|
||||||
echo "初始化 master haproxy $current_ip 成功"
|
echo "初始化 master haproxy $current_ip 成功"
|
||||||
|
|
||||||
|
|
||||||
@ -276,12 +276,14 @@ export KUBECONFIG=/etc/kubernetes/admin.conf
|
|||||||
|
|
||||||
# 安装 operator
|
# 安装 operator
|
||||||
kubectl create -f https://mirrors.martin98.com/repository/proxy/raw.githubusercontent.com/projectcalico/calico/v$CALICO_VERSION/manifests/tigera-operator.yaml
|
kubectl create -f https://mirrors.martin98.com/repository/proxy/raw.githubusercontent.com/projectcalico/calico/v$CALICO_VERSION/manifests/tigera-operator.yaml
|
||||||
|
sleep 5
|
||||||
kubectl wait --for=condition=Ready pods --all -n tigera-operator --timeout=300s
|
kubectl wait --for=condition=Ready pods --all -n tigera-operator --timeout=300s
|
||||||
echo "初始化 master $current_ip operator 成功"
|
echo "初始化 master $current_ip operator 成功"
|
||||||
# 安装 calico
|
# 安装 calico
|
||||||
curl https://mirrors.martin98.com/repository/proxy/raw.githubusercontent.com/projectcalico/calico/v$CALICO_VERSION/manifests/custom-resources.yaml -O
|
curl https://mirrors.martin98.com/repository/proxy/raw.githubusercontent.com/projectcalico/calico/v$CALICO_VERSION/manifests/custom-resources.yaml -O
|
||||||
sed -i "s|\(cidr: \).*|\1$pod_subnet|" custom-resources.yaml
|
sed -i "s|\(cidr: \).*|\1$pod_subnet|" custom-resources.yaml
|
||||||
kubectl create -f custom-resources.yaml
|
kubectl create -f custom-resources.yaml
|
||||||
|
sleep 5
|
||||||
kubectl wait --for=condition=Ready pods --all -n calico-system --timeout=300s
|
kubectl wait --for=condition=Ready pods --all -n calico-system --timeout=300s
|
||||||
kubectl wait --for=condition=Ready pods --all -n calico-apiserver --timeout=300s
|
kubectl wait --for=condition=Ready pods --all -n calico-apiserver --timeout=300s
|
||||||
echo "初始化 master $current_ip calico 成功"
|
echo "初始化 master $current_ip calico 成功"
|
||||||
|
195
k8s/README.md
195
k8s/README.md
@ -1,13 +1,4 @@
|
|||||||
## 初始化每个节点环境
|
## 快速部署
|
||||||
```bash
|
|
||||||
# 当前默认值
|
|
||||||
export K8S_VERSION=1.32
|
|
||||||
export CONTAINERD_VERSION=2.0.2
|
|
||||||
export CALICO_VERSION=3.29.1
|
|
||||||
export MIRRORS=docker.martin98.com/k8s
|
|
||||||
```
|
|
||||||
|
|
||||||
### 配置 k8s 属性
|
|
||||||
```bash
|
```bash
|
||||||
# 网段配置
|
# 网段配置
|
||||||
export pod_subnet=10.101.0.0/16
|
export pod_subnet=10.101.0.0/16
|
||||||
@ -18,7 +9,21 @@ export vip=$vip_ip/16
|
|||||||
export masters=10.1.3.101,10.1.3.102,10.1.3.103
|
export masters=10.1.3.101,10.1.3.102,10.1.3.103
|
||||||
curl -sSL https://git.martin98.com/MartinFarm/init/raw/branch/main/init-k8s.sh | bash
|
curl -sSL https://git.martin98.com/MartinFarm/init/raw/branch/main/init-k8s.sh | bash
|
||||||
```
|
```
|
||||||
### 初始化 单 master 节点
|
|
||||||
|
### 其他说明
|
||||||
|
```bash
|
||||||
|
# 拉取镜像失败可能导致节点不正常
|
||||||
|
systemctl restart kubelet
|
||||||
|
systemctl restart containerd # 或 docker
|
||||||
|
|
||||||
|
# 当前默认值
|
||||||
|
export K8S_VERSION=1.32
|
||||||
|
export CONTAINERD_VERSION=2.0.2
|
||||||
|
export CALICO_VERSION=3.29.1
|
||||||
|
export MIRRORS=docker.martin98.com/k8s
|
||||||
|
```
|
||||||
|
|
||||||
|
### 初始化 单 master 节点(不要配置 masters 只需要配置网段)
|
||||||
```bash
|
```bash
|
||||||
cat <<EOF > kubeadm-config.yaml
|
cat <<EOF > kubeadm-config.yaml
|
||||||
apiVersion: kubeadm.k8s.io/v1beta4
|
apiVersion: kubeadm.k8s.io/v1beta4
|
||||||
@ -91,171 +96,3 @@ kubectl create -f custom-resources.yaml
|
|||||||
kubectl wait --for=condition=Ready pods --all -n calico-system --timeout=300s
|
kubectl wait --for=condition=Ready pods --all -n calico-system --timeout=300s
|
||||||
kubectl wait --for=condition=Ready pods --all -n calico-apiserver --timeout=300s
|
kubectl wait --for=condition=Ready pods --all -n calico-apiserver --timeout=300s
|
||||||
```
|
```
|
||||||
### 初始化高可用集群
|
|
||||||
#### 所有 master
|
|
||||||
```bash
|
|
||||||
current_ip=$(hostname -I | awk '{print $1}')
|
|
||||||
first_master=$(echo $masters | cut -d',' -f1)
|
|
||||||
if [ "$current_ip" == "$first_master" ]; then
|
|
||||||
state=MASTER
|
|
||||||
priority=200
|
|
||||||
else
|
|
||||||
state=BACKUP
|
|
||||||
priority=100
|
|
||||||
fi
|
|
||||||
cat <<EOF | sudo tee /etc/keepalived/keepalived.conf
|
|
||||||
vrrp_instance VI_1 {
|
|
||||||
state $state
|
|
||||||
interface eth0
|
|
||||||
virtual_router_id 51
|
|
||||||
priority $priority
|
|
||||||
advert_int 1
|
|
||||||
virtual_ipaddress {
|
|
||||||
$vip
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
IFS=',' read -r -a master_ips <<< "$masters"
|
|
||||||
backend_config=""
|
|
||||||
for ((i=0; i<${#master_ips[@]}; i++)); do
|
|
||||||
backend_config+=" server master-$((i+1)) ${master_ips[$i]}:6444 check"$'\n'
|
|
||||||
done
|
|
||||||
mkdir -p /usr/local/haproxy
|
|
||||||
cat <<EOF | sudo tee /etc/haproxy/haproxy.cfg
|
|
||||||
global
|
|
||||||
log 127.0.0.1 local0 notice
|
|
||||||
maxconn 10000
|
|
||||||
chroot /usr/local/haproxy
|
|
||||||
user haproxy
|
|
||||||
group haproxy
|
|
||||||
daemon
|
|
||||||
# ssl 优化
|
|
||||||
tune.ssl.default-dh-param 2048
|
|
||||||
tune.bufsize 32768
|
|
||||||
|
|
||||||
# 默认配置
|
|
||||||
defaults
|
|
||||||
log global
|
|
||||||
option httplog
|
|
||||||
option dontlognull
|
|
||||||
timeout connect 5000
|
|
||||||
timeout client 50000
|
|
||||||
timeout server 50000
|
|
||||||
|
|
||||||
frontend k8s-api
|
|
||||||
bind *:6443
|
|
||||||
mode tcp
|
|
||||||
option tcplog
|
|
||||||
default_backend k8s-api-backend
|
|
||||||
|
|
||||||
backend k8s-api-backend
|
|
||||||
mode tcp
|
|
||||||
option tcp-check
|
|
||||||
balance roundrobin
|
|
||||||
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
|
|
||||||
$backend_config
|
|
||||||
EOF
|
|
||||||
groupadd -r haproxy || echo "用户组已存在"
|
|
||||||
useradd -r -g haproxy -s /sbin/nologin haproxy || echo "用户已存在"
|
|
||||||
sudo systemctl restart keepalived && sudo systemctl restart haproxy
|
|
||||||
systemctl status keepalived
|
|
||||||
systemctl status haproxy
|
|
||||||
```
|
|
||||||
### Master-01
|
|
||||||
```bash
|
|
||||||
# 配置
|
|
||||||
cat <<EOF > kubeadm-config.yaml
|
|
||||||
apiVersion: kubeadm.k8s.io/v1beta4
|
|
||||||
bootstrapTokens:
|
|
||||||
- groups:
|
|
||||||
- system:bootstrappers:kubeadm:default-node-token
|
|
||||||
token: $(openssl rand -hex 3).$(openssl rand -hex 8)
|
|
||||||
ttl: 24h0m0s
|
|
||||||
usages:
|
|
||||||
- signing
|
|
||||||
- authentication
|
|
||||||
kind: InitConfiguration
|
|
||||||
localAPIEndpoint:
|
|
||||||
advertiseAddress: $(hostname -I | awk '{print $1}')
|
|
||||||
bindPort: 6444
|
|
||||||
nodeRegistration:
|
|
||||||
criSocket: unix:///var/run/containerd/containerd.sock
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
imagePullSerial: true
|
|
||||||
name: $(hostname)
|
|
||||||
taints: null
|
|
||||||
timeouts:
|
|
||||||
controlPlaneComponentHealthCheck: 4m0s
|
|
||||||
discovery: 5m0s
|
|
||||||
etcdAPICall: 2m0s
|
|
||||||
kubeletHealthCheck: 4m0s
|
|
||||||
kubernetesAPICall: 1m0s
|
|
||||||
tlsBootstrap: 5m0s
|
|
||||||
upgradeManifests: 5m0s
|
|
||||||
---
|
|
||||||
apiServer: {}
|
|
||||||
apiVersion: kubeadm.k8s.io/v1beta4
|
|
||||||
caCertificateValidityPeriod: 87600h0m0s
|
|
||||||
certificateValidityPeriod: 8760h0m0s
|
|
||||||
certificatesDir: /etc/kubernetes/pki
|
|
||||||
clusterName: kubernetes
|
|
||||||
controlPlaneEndpoint: "$vip_ip:6443"
|
|
||||||
controllerManager: {}
|
|
||||||
dns:
|
|
||||||
imageRepository: $mirrors/coredns
|
|
||||||
encryptionAlgorithm: RSA-2048
|
|
||||||
etcd:
|
|
||||||
local:
|
|
||||||
dataDir: /var/lib/etcd
|
|
||||||
imageRepository: $mirrors
|
|
||||||
kind: ClusterConfiguration
|
|
||||||
kubernetesVersion: $k8s_version
|
|
||||||
networking:
|
|
||||||
dnsDomain: cluster.local
|
|
||||||
podSubnet: $pod_subnet
|
|
||||||
serviceSubnet: $service_subnet
|
|
||||||
proxy: {}
|
|
||||||
scheduler: {}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# 开始安装
|
|
||||||
kubeadm init --config=kubeadm-config.yaml --upload-certs --v=9
|
|
||||||
# 配置
|
|
||||||
mkdir -p $HOME/.kube
|
|
||||||
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
|
|
||||||
sudo chown $(id -u):$(id -g) $HOME/.kube/config
|
|
||||||
export KUBECONFIG=/etc/kubernetes/admin.conf
|
|
||||||
|
|
||||||
# 安装 operator
|
|
||||||
kubectl create -f https://mirrors.martin98.com/repository/proxy/raw.githubusercontent.com/projectcalico/calico/v$CALICO_VERSION/manifests/tigera-operator.yaml
|
|
||||||
kubectl wait --for=condition=Ready pods --all -n tigera-operator --timeout=300s
|
|
||||||
# 安装 calico
|
|
||||||
curl https://mirrors.martin98.com/repository/proxy/raw.githubusercontent.com/projectcalico/calico/v$CALICO_VERSION/manifests/custom-resources.yaml -O
|
|
||||||
sed -i "s|\(cidr: \).*|\1$pod_subnet|" custom-resources.yaml
|
|
||||||
kubectl create -f custom-resources.yaml
|
|
||||||
kubectl wait --for=condition=Ready pods --all -n calico-system --timeout=300s
|
|
||||||
kubectl wait --for=condition=Ready pods --all -n calico-apiserver --timeout=300s
|
|
||||||
```
|
|
||||||
|
|
||||||
### 加入集群
|
|
||||||
```bash
|
|
||||||
kubeadm token create --print-join-command
|
|
||||||
# worker 加入
|
|
||||||
kubeadm join 10.1.2.200:6443 \
|
|
||||||
--token ??? \
|
|
||||||
--discovery-token-ca-cert-hash ???
|
|
||||||
# admin 加入
|
|
||||||
kubeadm join 10.1.2.200:6443 \
|
|
||||||
--token ??? \
|
|
||||||
--discovery-token-ca-cert-hash ??? \
|
|
||||||
--control-plane
|
|
||||||
# 验证集群
|
|
||||||
kubectl get nodes
|
|
||||||
```
|
|
||||||
|
|
||||||
### 其他说明
|
|
||||||
```bash
|
|
||||||
# 拉取镜像失败可能导致节点不正常
|
|
||||||
systemctl restart kubelet
|
|
||||||
systemctl restart containerd # 或 docker
|
|
||||||
```
|
|
Loading…
x
Reference in New Issue
Block a user