本文采用的是堆叠etcd节点方式,使用外部etcd集群方案有所不同,后面有空会再补充。
机器和环境
主机 |
IP |
角色 |
安装软件 |
master01 |
172.31.31.163(漂移IP:172.31.31.200) |
主节点1 |
keepalived、haproxy、containerd、kubelet、etcd、kube-scheduler、kube-apiserver、kube-controller-manager、kube-proxy |
master02 |
172.31.31.164(漂移IP:172.31.31.200) |
主节点2 |
keepalived、haproxy、containerd、kubelet、etcd、kube-scheduler、kube-apiserver、kube-controller-manager、kube-proxy |
master03 |
172.31.31.165(漂移IP:172.31.31.200) |
主节点3 |
keepalived、haproxy、containerd、kubelet、etcd、kube-scheduler、kube-apiserver、kube-controller-manager、kube-proxy |
node01 |
172.31.31.166 |
工作节点1 |
containerd、kubelet、kube-proxy、tigera-operator(calico) |
环境准备
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
|
# 关闭swap
swapoff -a
sed -ir 's/.*swap.*/#&/' /etc/fstab
# disable防火墙
systemctl stop firewalld
systemctl disable firewalld
# disable selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0
# 安装必要软件
dnf install -y wget tree curl bash-completion jq vim net-tools telnet git lrzsz epel-release tar
# 文件句柄
ulimit -SHn 65535 && \
cat > /etc/security/limits.conf <<EOF
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited
EOF
# 安装ipvs
dnf install -y ipvsadm ipset sysstat conntrack libseccomp
# 启用ipvs
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
overlay
br_netfilter
EOF
modprobe br_netfilter overlay ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh nf_conntrack ip_tables ip_set xt_set ipt_set ipt_rpfilter ipt_REJECT ipip
systemctl restart systemd-modules-load.service
# 修改内核参数
cat > /etc/sysctl.d/95-k8s-sysctl.conf <<EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-arptables = 1
fs.may_detach_mounts = 1
vm.swappiness = 0
vm.overcommit_memory=1
vm.panic_on_oom=0
vm.max_map_count=655360
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
sysctl --system
# 增加解析域名
cat >> /etc/hosts <<-EOF
172.31.31.200 master-vip
172.31.31.163 master01
172.31.31.164 master02
172.31.31.165 master03
172.31.31.166 node01
EOF
|
安装keepalived
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
|
# master01 安装keepalived
dnf install -y keepalived
cat > /etc/keepalived/keepalived.conf <<-EOF
# Master
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_script check_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 3
weight -2
fall 10
rise 2
}
vrrp_instance VI_1 {
state MASTER
interface ens160
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1314
}
virtual_ipaddress {
172.31.31.200
}
track_script {
check_apiserver
}
}
EOF
cat > /etc/keepalived/check_apiserver.sh <<-EOF
#!/bin/sh
APISERVER_VIP=172.31.31.200
APISERVER_DEST_PORT=6443
errorExit() {
echo "*** $*" 1>&2
exit 1
}
curl --silent --max-time 2 --insecure https://localhost:${APISERVER_DEST_PORT}/ -o /dev/null || errorExit "Error GET https://localhost:${APISERVER_DEST_PORT}/"
if ip addr | grep -q ${APISERVER_VIP}; then
curl --silent --max-time 2 --insecure https://${APISERVER_VIP}:${APISERVER_DEST_PORT}/ -o /dev/null || errorExit "Error GET https://${APISERVER_VIP}:${APISERVER_DEST_PORT}/"
fi
EOF
systemctl enable keepalived --now
# master02安装keepalived
ssh root@master02 "dnf install -y keepalived"
scp /etc/keepalived/keepalived.conf root@master02:/etc/keepalived/
scp /etc/keepalived/check_apiserver.sh root@master02:/etc/keepalived/
ssh root@master02 "sed -i 's|state MASTER|state SLAVE|' /etc/keepalived/keepalived.conf && sed -i 's|priority 100|priority 90|' /etc/keepalived/keepalived.conf && systemctl enable keepalived --now"
# master03安装keepalived
ssh root@master03 "dnf install -y keepalived"
scp /etc/keepalived/keepalived.conf root@master03:/etc/keepalived/
scp /etc/keepalived/check_apiserver.sh root@master03:/etc/keepalived/
ssh root@master03 "sed -i 's|state MASTER|state SLAVE|' /etc/keepalived/keepalived.conf && sed -i 's|priority 100|priority 89|' /etc/keepalived/keepalived.conf && systemctl enable keepalived --now"
|
安装haproxy
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
|
# master01安装haproxy
dnf install haproxy -y
cat > /etc/haproxy/haproxy.cfg <<-EOF
#---------------------------------------------------------------------
# apiserver frontend which proxys to the masters
#---------------------------------------------------------------------
frontend apiserver
bind *:16443
mode tcp
option tcplog
default_backend apiserver
#---------------------------------------------------------------------
# round robin balancing for apiserver
#---------------------------------------------------------------------
backend apiserver
option httpchk GET /healthz
http-check expect status 200
mode tcp
option ssl-hello-chk
balance roundrobin
server master01 172.31.31.163:6443 check
server master02 172.31.31.164:6443 check
server master03 172.31.31.165:6443 check
EOF
# master02安装haproxy
scp /etc/haproxy/haproxy.cfg root@master02:/etc/haproxy/
ssh root@master02 "systemctl enable haproxy --now"
# master03安装haproxy
scp /etc/haproxy/haproxy.cfg root@master03:/etc/haproxy/
ssh root@master03 "systemctl enable haproxy --now"
|
安装containerd
访问containerd发布页面,下载 cri-containerd-cni-<version>-linux-amd64.tar.gz
Releases · containerd/containerd (github.com)
解压之后,修改配置文件分发到两个节点:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
|
# master01 解压、启动
# 解压
tar xvf cri-containerd-cni-<version>-linux-amd64.tar.gz
# 修改配置
mkdir etc/containerd
containerd config default > etc/containerd/config.toml
sed -i 's|SystemdCgroup = false|SystemdCgroup = true|' etc/containerd/config.toml
sed -i 's|sandbox_image = "registry.k8s.io/pause:3.6"|sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.9"|' etc/containerd/config.toml
scp -r usr /
scp -r etc /
scp -r opt /
systemctl enable containerd --now
# master02 分发
scp -r usr root@master02:/
scp -r etc root@master02:/
scp -r opt root@master02:/
ssh master02 "systemctl enable containerd --now"
# master03分发
scp -r usr root@master03:/
scp -r etc root@master03:/
scp -r opt root@master03:/
ssh master03 "systemctl enable containerd --now"
|
安装kubeadm
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
|
# master01
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/repodata/repomd.xml.key
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
EOF
dnf install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
systemctl enable --now kubelet
# master02
scp /etc/yum.repos.d/kubernetes.repo root@master02:/etc/yum.repos.d/kubernetes.repo
ssh root@master02 "dnf install -y kubelet kubeadm kubectl --disableexcludes=kubernetes && systemctl enable --now kubelet"
# master03
scp /etc/yum.repos.d/kubernetes.repo root@master03:/etc/yum.repos.d/kubernetes.repo
ssh root@master03 "dnf install -y kubelet kubeadm kubectl --disableexcludes=kubernetes && systemctl enable --now kubelet"
|
初始化K8S主节点
1
2
3
4
5
6
7
8
9
|
# master01执行初始化
kubeadm init --control-plane-endpoint "master-vip:16443" --upload-certs --image-repository=registry.aliyuncs.com/google_containers
# 获取controle plane的join语句分别在master02、master03执行
ssh root@master02 "kubeadm join master-vip:16443 --token exg9e4.95q0m3c0kf2n9guv --discovery-token-ca-cert-hash sha256:66bf43839cb013300179f397979b1d2c20b997df2e46f651f7d6c2f60c2aae84 --control-plane --certificate-key 33bad54b2097889692bc8293e15d71668ccccd6f94b8e9c077aaed7f5810e6c1"
ssh root@master03 "kubeadm join master-vip:16443 --token exg9e4.95q0m3c0kf2n9guv --discovery-token-ca-cert-hash sha256:66bf43839cb013300179f397979b1d2c20b997df2e46f651f7d6c2f60c2aae84 --control-plane --certificate-key 33bad54b2097889692bc8293e15d71668ccccd6f94b8e9c077aaed7f5810e6c1"
# 获取worker的join语句在node01执行
ssh root@node01 "kubeadm join master-vip:16443 --token exg9e4.95q0m3c0kf2n9guv --discovery-token-ca-cert-hash sha256:66bf43839cb013300179f397979b1d2c20b997df2e46f651f7d6c2f60c2aae84"
|
1
2
|
# 获取节点
kubectl get nodes -o wide
|
安装calico
1
2
3
|
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.27.0/manifests/tigera-operator.yaml
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.27.0/manifests/custom-resources.yaml
watch kubectl get pods -n tigera-operator
|