侧边栏壁纸
博主头像
钟小言

致力于为您提供丰富而有趣的内容,旨在启发思考、分享知识。

  • 累计撰写 20 篇文章
  • 累计收到 4 条评论

欧拉系统双Master高可用Kubernetes集群一键式部署指南

2025-4-17 / 0 评论 / 4257 阅读

欧拉系统双Master高可用Kubernetes集群一键式部署指南

一、系统初始化(所有节点执行)

# ===================== 基础配置 =====================
# 禁用防火墙与SELinux
sudo systemctl stop firewalld && sudo systemctl disable firewalld
sudo setenforce 0
sudo sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config

# 关闭Swap
sudo swapoff -a
sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

# 配置内核参数
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sudo sysctl --system

# 设置主机名解析(根据实际IP修改)
cat <<EOF | sudo tee -a /etc/hosts
172.20.1.11 master01
172.20.1.12 master02
172.20.1.21 node01
172.20.1.10 lb-vip
EOF

# ===================== 时间同步 =====================
sudo yum install -y chrony
sudo systemctl enable chronyd && sudo systemctl start chronyd
chronyc sources -v  # 验证输出包含^*表示同步正常

# ===================== 日志审计 =====================
sudo mkdir -p /var/log/kubernetes/audit

二、负载均衡层部署(lb01/lb02执行)

# ===================== HAProxy安装 =====================
sudo yum install -y haproxy

# 生成配置文件(注意替换实际Master IP)
cat <<EOF | sudo tee /etc/haproxy/haproxy.cfg
global
    log /dev/log local0
    maxconn 20000
    user haproxy
    group haproxy

defaults
    log global
    mode tcp
    timeout connect 5s
    timeout client 50s
    timeout server 50s

frontend k8s-api
    bind *:6443
    default_backend k8s-api

backend k8s-api
    balance roundrobin
    option tcp-check
    server master01 172.20.1.11:6443 check port 6443 inter 5s fall 3 rise 2
    server master02 172.20.1.12:6443 check port 6443 inter 5s fall 3 rise 2

frontend k8s-http
    bind *:80
    bind *:443
    default_backend k8s-http

backend k8s-http
    balance roundrobin
    server master01 172.20.1.11:80 check
    server master02 172.20.1.12:80 check
EOF

# 启动服务
sudo systemctl enable haproxy && sudo systemctl restart haproxy
ss -ltnp | grep 6443  # 应显示HAProxy监听端口

# ===================== Keepalived配置 =====================
sudo yum install -y keepalived

# 主节点配置(lb01)
cat <<EOF | sudo tee /etc/keepalived/keepalived.conf
vrrp_script chk_haproxy {
    script "pidof haproxy"
    interval 2
    weight 2
}

vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 123456
    }
    virtual_ipaddress {
        172.20.1.10/24
    }
    track_script {
        chk_haproxy
    }
}
EOF

# 备节点配置(lb02):
# 修改state为BACKUP,priority改为90

sudo systemctl enable keepalived && sudo systemctl restart keepalived
ip addr show eth0 | grep '172.20.1.10'  # 应看到VIP绑定

三、容器运行时安装(所有节点执行)

# ===================== Containerd安装 =====================
cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
sudo modprobe overlay
sudo modprobe br_netfilter

sudo yum install -y containerd
sudo containerd config default | sudo tee /etc/containerd/config.toml
sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
sudo systemctl enable containerd && sudo systemctl restart containerd

# 验证运行时状态
sudo ctr version  # 应显示客户端和服务端版本

四、Kubernetes组件安装(所有节点执行)

# ===================== 配置仓库 =====================
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
EOF

# ===================== 安装指定版本 =====================
sudo yum install -y kubeadm-1.28.2 kubelet-1.28.2 kubectl-1.28.2
sudo systemctl enable kubelet

# 检查版本
kubeadm version -o short  # 应输出v1.28.2

五、首个Master节点初始化(master01执行)

# ===================== 生成初始化配置 =====================
cat <<EOF | tee kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
nodeRegistration:
  criSocket: "unix:///var/run/containerd/containerd.sock"
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
kubernetesVersion: v1.28.2
controlPlaneEndpoint: "lb-vip:6443"
networking:
  podSubnet: 10.244.0.0/16
apiServer:
  certSANs:
  - "lb-vip"
  - "172.20.1.10"
imageRepository: registry.aliyuncs.com/google_containers
EOF

# ===================== 执行初始化 =====================
sudo kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log

# 配置kubectl
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

# 获取加入命令
echo "Master加入命令:"
grep 'kubeadm join' kubeadm-init.log -A2
echo "Worker加入命令:"
kubeadm token create --print-join-command

六、第二个Master节点加入(master02执行)

# 使用master01生成的control-plane加入命令
sudo kubeadm join lb-vip:6443 --token <your-token> \
  --discovery-token-ca-cert-hash sha256:<your-hash> \
  --control-plane --certificate-key <your-cert-key> \
  --apiserver-advertise-address=172.20.1.12

# 验证etcd集群
sudo docker run --rm -it \
  --net host \
  -v /etc/kubernetes/pki/etcd:/etc/kubernetes/pki/etcd \
  registry.aliyuncs.com/google_containers/etcd:3.5.6-0 \
  etcdctl --endpoints=https://172.20.1.11:2379 \
  --cert=/etc/kubernetes/pki/etcd/peer.crt \
  --key=/etc/kubernetes/pki/etcd/peer.key \
  --cacert=/etc/kubernetes/pki/etcd/ca.crt \
  endpoint health

七、Worker节点加入(node01执行)

# 使用worker加入命令
sudo kubeadm join lb-vip:6443 --token <your-token> \
  --discovery-token-ca-cert-hash sha256:<your-hash>

# 在主节点验证
kubectl get nodes -o wide -w

八、网络插件部署(任一Master执行)

# ===================== Calico安装 =====================
kubectl apply -f https://docs.projectcalico.org/v3.26/manifests/calico.yaml

# 监控安装进度
watch kubectl get pods -n kube-system -l k8s-app=calico-node

# 验证网络连通性
kubectl create deployment nginx --image=nginx:alpine
kubectl expose deployment nginx --port=80
kubectl run test --image=busybox --rm -it -- ping nginx.default.svc.cluster.local

九、验证高可用性

# 模拟Master节点故障
ssh master01 "sudo systemctl stop kube-apiserver"
kubectl get componentstatus  # 观察服务状态切换

# 验证VIP漂移
ssh lb01 "sudo systemctl stop keepalived"
ping -c 5 lb-vip  # 应持续可达

十、部署后检查清单

# 集群状态检查
kubectl get componentstatus
kubectl get nodes -o wide
kubectl get pods -A -o wide

# 网络策略验证
kubectl run test-pod --image=nginx:alpine --restart=Never --rm -it -- sh
# 在容器内执行:
curl -I http://kubernetes.default
ping <其他节点Pod IP>

# 存储验证
kubectl apply -f - <<EOF
apiVersion: v1
kind: PersistentVolume
metadata:
  name: test-pv
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Recycle
  hostPath:
    path: /data/test-pv
EOF
kubectl get pv

故障排查命令速查

# 查看kubelet日志
journalctl -u kubelet -f

# 检查证书有效期
kubeadm certs check-expiration

# 重置节点(危险!)
kubeadm reset -f
rm -rf /etc/cni/net.d /etc/kubernetes/ $HOME/.kube

# 强制删除Pod
kubectl delete pod <pod-name> --grace-period=0 --force

部署完成验证清单

  1. 所有节点状态为Ready
  2. calico-node Pod全部Running
  3. coredns Pod正常运行
  4. 能跨节点访问Service
  5. VIP漂移测试成功
  6. 控制平面组件无警告信息
收藏

扫描二维码,在手机上阅读

评论一下?

OωO
取消