1. 环境信息

System OS           IP Address      Docker   Kernel              Hostname    Cpu Memory  Role

CentOS 7.7.1908 192.168.1.171   19.03.7  3.10.0-1062.12.1.el7    k8s-m01 2C  2G  k8s-master

CentOS 7.7.1908 192.168.1.172   19.03.7  3.10.0-1062.12.1.el7    k8s-m02 2C  2G  k8s-master

CentOS 7.7.1908 192.168.1.173   19.03.7  3.10.0-1062.12.1.el7    k8s-m03 2C  2G  k8s-master

CentOS 7.7.1908 192.168.1.174   19.03.7  3.10.0-1062.12.1.el7    k8s-n01 2C  2G   k8s-node

2. 版本信息

kubeadm: v1.17.3

Kubernetes: v1.17.3

etcd: 3.4.3-0

Docker CE: 19.03.7

Calico: v3.13.0

3. 网络信息

Cluster IP CIDR: 10.244.0.0/16

Service Cluster IP CIDR: 10.96.0.0/12

Service DNS IP: 10.96.0.10

DNS DN: cluster.local

Kubernetes API: apiserver.k8s.local:6443    # apiserver.k8s.local 需要绑定host解析,ip指向k8s master的api server

4. YUM源配置

wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo

wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo

cat <<EOF > /etc/yum.repos.d/kubernetes.repo

[kubernetes]

name=Kubernetes

baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/

enabled=1

gpgcheck=1

repo_gpgcheck=1

gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

EOF

yum install -y kubelet kubeadm kubectl

systemctl enable kubelet && systemctl start kubelet

yum install -y yum-utils device-mapper-persistent-data lvm2

yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

yum makecache fast

yum list docker-ce.x86_64 --showduplicates | sort -r

yum -y install docker-ce

5. 关闭防火墙

systemctl stop firewalld && systemctl disable firewalld

6. 关掉网络服务

systemctl stop NetworkManager && systemctl disable NetworkManager

## 为什么要关闭NetworkManager,因为Calico会跟NetworkManager发生冲突,NetworkManager会试图接管Calico。

7. 关闭selinux

setenforce 0

sed -i "s#=enforcing#=disabled#g" /etc/selinux/config

8. 关闭swap

swapoff -a && sysctl -w vm.swappiness=0

sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab

9. 同步时间

yum install -y chrony

ntpdate 0.cn.pool.ntp.org

hwclock --systohc

cat << EOF >> /etc/ntp.conf

server 0.cn.pool.ntp.org

server 1.cn.pool.ntp.org

server 2.cn.pool.ntp.org

server 3.cn.pool.ntp.org

EOF

systemctl restart chronyd && systemctl enable chronyd

10. 系统参数调整

cat <<EOF > /etc/sysctl.d/k8s.conf

# 修复ipvs模式下长连接timeout问题 小于900即可

net.ipv4.tcp_keepalive_time = 600

net.ipv4.tcp_keepalive_intvl = 30

net.ipv4.tcp_keepalive_probes = 10

net.ipv6.conf.all.disable_ipv6 = 1

net.ipv6.conf.default.disable_ipv6 = 1

net.ipv6.conf.lo.disable_ipv6 = 1

net.ipv4.neigh.default.gc_stale_time = 120

net.ipv4.conf.all.rp_filter = 0

net.ipv4.conf.default.rp_filter = 0

net.ipv4.conf.default.arp_announce = 2

net.ipv4.conf.lo.arp_announce = 2

net.ipv4.conf.all.arp_announce = 2

net.ipv4.ip_forward = 1

net.ipv4.tcp_max_tw_buckets = 5000

net.ipv4.tcp_syncookies = 1

net.ipv4.tcp_max_syn_backlog = 1024

net.ipv4.tcp_synack_retries = 2

# 要求iptables不对bridge的数据进行处理

net.bridge.bridge-nf-call-ip6tables = 1

net.bridge.bridge-nf-call-iptables = 1

net.bridge.bridge-nf-call-arptables = 1

net.netfilter.nf_conntrack_max = 2310720

fs.inotify.max_user_watches=89100

fs.may_detach_mounts = 1

fs.file-max = 52706963

fs.nr_open = 52706963

vm.swappiness = 0

vm.overcommit_memory=1

vm.panic_on_oom=0

EOF

sysctl --system

11. 设置节点主机名解析

cat << EOF >> /etc/hosts

192.168.1.171   k8s-m01

192.168.1.172   k8s-m02

192.168.1.173   k8s-m03

192.168.1.174   k8s-n01

EOF

12. 启用ipvs

yum install ipvsadm ipset sysstat conntrack libseccomp -y

## 开机自启动加载ipvs内核

vim  /etc/modules-load.d/ipvs.conf

modprobe ip_vs

modprobe ip_vs_rr

modprobe ip_vs_wrr

modprobe ip_vs_sh

modprobe nf_conntrack_ipv4

modprobe br_netfilter

chmod 755 /etc/modules-load.d/ipvs.conf

bash /etc/modules-load.d/ipvs.conf

13. 配置docker

cat >> /etc/docker/daemon.json <<EOF

{

    "log-driver": "json-file",

    "log-opts": {"max-size": "100m"},

    "storage-driver": "overlay2",

    "storage-opts": ["overlay2.override_kernel_check=true"],

    "max-concurrent-downloads": 10,

    "max-concurrent-uploads": 10,

    "exec-opts": ["native.cgroupdriver=systemd"],

    "registry-mirrors": [

        "https://xxxxx.mirror.aliyuncs.com"

    ]

}

EOF

systemctl start docker && systemctl enable docker

14. 配置免密登录

在ks8-m01操作

yum install sshpass -y

ssh-keygen -t rsa -P '' -f /root/.ssh/id_rsa

for NODE in k8s-m01 k8s-m02 k8s-m03 k8s-n01; do

  echo "--- $NODE ---"

  sshpass -p abcd1234 ssh-copy-id -o "StrictHostKeyChecking no" -i /root/.ssh/id_rsa.pub ${NODE}

  ssh ${NODE} "hostnamectl set-hostname ${NODE}"

done

15. 下面的操作在k8s-m01节点上进行

建立kubeadm-config.yaml

cat <<EOF > kubeadm-config.yaml

apiVersion: kubeadm.k8s.io/v1beta2

kind: ClusterConfiguration

kubernetesVersion: v1.17.3

controlPlaneEndpoint: "apiserver.k8s.local:6443"

networking:

  podSubnet: "10.244.0.0/16"

imageRepository: "gcr.azk8s.cn/google_containers"

---

apiVersion: kubeproxy.config.k8s.io/v1alpha1

kind: KubeProxyConfiguration

mode: ipvs

EOF

## controlPlaneEndpoint 是api server的地址

## 添加host绑定

echo '127.0.0.1 apiserver.k8s.local' >> /etc/hosts

16. 使用kubeadm初始化control plane

## 如果网速太慢,可以先把image都pull下来

kubeadm config images pull --config=kubeadm-config.yaml

kubeadm init --config=kubeadm-config.yaml --upload-certs

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube

  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.

Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:

  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join apiserver.k8s.local:6443 --token 259pgq.fjrb8zpx3uzbsx8w

    --discovery-token-ca-cert-hash sha256:1f52834e5713cc55a2bf9516aa32246ac167d042270be2041aba491cd665b908

    --control-plane --certificate-key 39ed22131651a43bc0473e4fbc19fc7eb2540f8a3e7abb155ddad15a6394c4fb

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!

As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use

"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join apiserver.k8s.local:6443 --token 259pgq.fjrb8zpx3uzbsx8w

    --discovery-token-ca-cert-hash sha256:1f52834e5713cc55a2bf9516aa32246ac167d042270be2041aba491cd665b908

17. k8s-m01设置kubeconfig

mkdir -p $HOME/.kube

cp -rp /etc/kubernetes/admin.conf $HOME/.kube/config

chown $(id -u):$(id -g) $HOME/.kube/config

[root@k8s-m01 ~]# kubectl get no

NAME     STATUS     ROLES    AGE     VERSION

k8s-m01   NotReady   master   3m32s   v1.17.3

[root@k8s-m01 ~]# kubectl get cs

NAME                 STATUS    MESSAGE             ERROR

controller-manager   Healthy   ok                  

scheduler            Healthy   ok                  

etcd-0               Healthy   {"health":"true"}  

18. k8s-m01部署 Calico CNI plugin

wget https://docs.projectcalico.org/manifests/calico.yaml

kubectl apply -f calico.yaml

## 最新版的Calico已经不需要修改YAML配置文件中的Cluster CIDR,如果系统已经设置Cluster CIDR,它会自适应。

## 下载Calico管理工具

wget -O /usr/local/bin/calicoctl https://github.com/projectcalico/calicoctl/releases/download/v3.13.0/calicoctl

chmod +x /usr/local/bin/calicoctl

export DATASTORE_TYPE=kubernetes

export KUBECONFIG=~/.kube/config

[root@k8s-m01 ~]# calicoctl get ippool -o wide

NAME                  CIDR            NAT    IPIPMODE   VXLANMODE   DISABLED   SELECTOR   

default-ipv4-ippool   10.244.0.0/16   true   Always     Never       false      all() 

19. 在其他k8s-mater节点上执行

将其他两个master节点加入进集群

## api域名先指向k8s-m01

echo '192.168.1.171 apiserver.k8s.local' >> /etc/hosts

kubeadm join apiserver.k8s.local:6443 --token 259pgq.fjrb8zpx3uzbsx8w

    --discovery-token-ca-cert-hash sha256:1f52834e5713cc55a2bf9516aa32246ac167d042270be2041aba491cd665b908

    --control-plane --certificate-key 39ed22131651a43bc0473e4fbc19fc7eb2540f8a3e7abb155ddad15a6394c4fb

mkdir -p $HOME/.kube

cp -rp /etc/kubernetes/admin.conf $HOME/.kube/config

chown $(id -u):$(id -g) $HOME/.kube/config

## 然后再将api域名指向本地

sed -i 's#192.168.1.171 apiserver.k8s.local#127.0.0.1 apiserver.k8s.local#g' /etc/hosts

21. 配置haproxy

## 使用haproxy来提供 Kubernetes API Server 的负载均衡,由于资源紧张故haproxy安装配置在k8s-n01节点上。

mkdir /usr/local/haproxy

tar -zxvf haproxy-1.9.10.tar.gz

cd haproxy-1.9.10

make TARGET=linux31## rhel7使用linux31,rhel6使用linux26,这个是根据内核版本确定的

make install PREFIX=/usr/local/haproxy

cat <<EOF > /usr/local/haproxy/conf/haproxy.cfg

global

  log 127.0.0.1 local0 info

defaults

  log global

  mode http

  option dontlognull

  timeout connect 5000ms

  timeout client 600000ms

  timeout server 600000ms

listen stats

  bind :9090

  mode http

  balance

  stats refresh 10s

  stats hide-version

  stats uri /haproxy_stats

  stats auth admin:admin123

  stats admin if TRUE

frontend kube-apiserver-https

   mode tcp

   bind :6443

   default_backend kube-apiserver-backend

backend kube-apiserver-backend

  mode tcp

  balance roundrobin

  stick-table type ip size 200k expire 30m

  stick on src

  server apiserver1 192.168.1.171:6443 check

  server apiserver2 192.168.1.172:6443 check

  server apiserver3 192.168.1.173:6443 check

EOF

## 配置rsyslog

vim /etc/rsyslog.conf

# Provides UDP syslog reception

$ModLoad imudp

$UDPServerRun 514

# 添加如下一行

local0.*          /var/log/haproxy.log

vim /etc/sysconfig/rsyslog

SYSLOGD_OPTIONS="-r -m 0 -c 2"

systemctl restart rsyslog

## 启动haproxy

/usr/local/haproxy/sbin/haproxy -f /usr/local/haproxy/conf/haproxy.conf &

22. k8s-n01加入集群

## 这里配置haproxy节点IP,由于haproxy配置在k8s-n01本地,所以写127.0.0.1。

echo '127.0.0.1 apiserver.k8s.local' >> /etc/hosts

kubeadm join apiserver.k8s.local:6443 --token 259pgq.fjrb8zpx3uzbsx8w

    --discovery-token-ca-cert-hash sha256:1f52834e5713cc55a2bf9516aa32246ac167d042270be2041aba491cd665b908

23. 检验成果

[root@k8s-m01 k8s]# kubectl get no -o wide

NAME      STATUS   ROLES    AGE   VERSION   INTERNAL-IP     EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION                CONTAINER-RUNTIME

k8s-m01   Ready    master   20h   v1.17.3   192.168.1.171   <none>        CentOS Linux 7 (Core)   3.10.0-1062.12.1.el7.x86_64   docker://19.3.7

k8s-m02   Ready    master   20h   v1.17.3   192.168.1.172   <none>        CentOS Linux 7 (Core)   3.10.0-1062.12.1.el7.x86_64   docker://19.3.7

k8s-m03   Ready    master   20h   v1.17.3   192.168.1.173   <none>        CentOS Linux 7 (Core)   3.10.0-1062.12.1.el7.x86_64   docker://19.3.7

k8s-n01   Ready    <none>   55m   v1.17.3   192.168.1.174   <none>        CentOS Linux 7 (Core)   3.10.0-1062.12.1.el7.x86_64   docker://19.3.7

1. 环境信息

System OS           IP Address      Docker   Kernel              Hostname    Cpu Memory  Role

CentOS 7.7.1908 192.168.1.171   19.03.7  3.10.0-1062.12.1.el7    k8s-m01 2C  2G  k8s-master

CentOS 7.7.1908 192.168.1.172   19.03.7  3.10.0-1062.12.1.el7    k8s-m02 2C  2G  k8s-master

CentOS 7.7.1908 192.168.1.173   19.03.7  3.10.0-1062.12.1.el7    k8s-m03 2C  2G  k8s-master

CentOS 7.7.1908 192.168.1.174   19.03.7  3.10.0-1062.12.1.el7    k8s-n01 2C  2G   k8s-node

2. 版本信息

kubeadm: v1.17.3

Kubernetes: v1.17.3

etcd: 3.4.3-0

Docker CE: 19.03.7

Calico: v3.13.0

3. 网络信息

Cluster IP CIDR: 10.244.0.0/16

Service Cluster IP CIDR: 10.96.0.0/12

Service DNS IP: 10.96.0.10

DNS DN: cluster.local

Kubernetes API: apiserver.k8s.local:6443    # apiserver.k8s.local 需要绑定host解析,ip指向k8s master的api server

4. YUM源配置

wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo

wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo

cat <<EOF > /etc/yum.repos.d/kubernetes.repo

[kubernetes]

name=Kubernetes

baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/

enabled=1

gpgcheck=1

repo_gpgcheck=1

gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

EOF

yum install -y kubelet kubeadm kubectl

systemctl enable kubelet && systemctl start kubelet

yum install -y yum-utils device-mapper-persistent-data lvm2

yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

yum makecache fast

yum list docker-ce.x86_64 --showduplicates | sort -r

yum -y install docker-ce

5. 关闭防火墙

systemctl stop firewalld && systemctl disable firewalld

6. 关掉网络服务

systemctl stop NetworkManager && systemctl disable NetworkManager

## 为什么要关闭NetworkManager,因为Calico会跟NetworkManager发生冲突,NetworkManager会试图接管Calico。

7. 关闭selinux

setenforce 0

sed -i "s#=enforcing#=disabled#g" /etc/selinux/config

8. 关闭swap

swapoff -a && sysctl -w vm.swappiness=0

sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab

9. 同步时间

yum install -y chrony

ntpdate 0.cn.pool.ntp.org

hwclock --systohc

cat << EOF >> /etc/ntp.conf

server 0.cn.pool.ntp.org

server 1.cn.pool.ntp.org

server 2.cn.pool.ntp.org

server 3.cn.pool.ntp.org

EOF

systemctl restart chronyd && systemctl enable chronyd

10. 系统参数调整

cat <<EOF > /etc/sysctl.d/k8s.conf

# 修复ipvs模式下长连接timeout问题 小于900即可

net.ipv4.tcp_keepalive_time = 600

net.ipv4.tcp_keepalive_intvl = 30

net.ipv4.tcp_keepalive_probes = 10

net.ipv6.conf.all.disable_ipv6 = 1

net.ipv6.conf.default.disable_ipv6 = 1

net.ipv6.conf.lo.disable_ipv6 = 1

net.ipv4.neigh.default.gc_stale_time = 120

net.ipv4.conf.all.rp_filter = 0

net.ipv4.conf.default.rp_filter = 0

net.ipv4.conf.default.arp_announce = 2

net.ipv4.conf.lo.arp_announce = 2

net.ipv4.conf.all.arp_announce = 2

net.ipv4.ip_forward = 1

net.ipv4.tcp_max_tw_buckets = 5000

net.ipv4.tcp_syncookies = 1

net.ipv4.tcp_max_syn_backlog = 1024

net.ipv4.tcp_synack_retries = 2

# 要求iptables不对bridge的数据进行处理

net.bridge.bridge-nf-call-ip6tables = 1

net.bridge.bridge-nf-call-iptables = 1

net.bridge.bridge-nf-call-arptables = 1

net.netfilter.nf_conntrack_max = 2310720

fs.inotify.max_user_watches=89100

fs.may_detach_mounts = 1

fs.file-max = 52706963

fs.nr_open = 52706963

vm.swappiness = 0

vm.overcommit_memory=1

vm.panic_on_oom=0

EOF

sysctl --system

11. 设置节点主机名解析

cat << EOF >> /etc/hosts

192.168.1.171   k8s-m01

192.168.1.172   k8s-m02

192.168.1.173   k8s-m03

192.168.1.174   k8s-n01

EOF

12. 启用ipvs

yum install ipvsadm ipset sysstat conntrack libseccomp -y

## 开机自启动加载ipvs内核

vim  /etc/modules-load.d/ipvs.conf

modprobe ip_vs

modprobe ip_vs_rr

modprobe ip_vs_wrr

modprobe ip_vs_sh

modprobe nf_conntrack_ipv4

modprobe br_netfilter

chmod 755 /etc/modules-load.d/ipvs.conf

bash /etc/modules-load.d/ipvs.conf

13. 配置docker

cat >> /etc/docker/daemon.json <<EOF

{

    "log-driver": "json-file",

    "log-opts": {"max-size": "100m"},

    "storage-driver": "overlay2",

    "storage-opts": ["overlay2.override_kernel_check=true"],

    "max-concurrent-downloads": 10,

    "max-concurrent-uploads": 10,

    "exec-opts": ["native.cgroupdriver=systemd"],

    "registry-mirrors": [

        "https://xxxxx.mirror.aliyuncs.com"

    ]

}

EOF

systemctl start docker && systemctl enable docker

14. 配置免密登录

在ks8-m01操作

yum install sshpass -y

ssh-keygen -t rsa -P '' -f /root/.ssh/id_rsa

for NODE in k8s-m01 k8s-m02 k8s-m03 k8s-n01; do

  echo "--- $NODE ---"

  sshpass -p abcd1234 ssh-copy-id -o "StrictHostKeyChecking no" -i /root/.ssh/id_rsa.pub ${NODE}

  ssh ${NODE} "hostnamectl set-hostname ${NODE}"

done

15. 下面的操作在k8s-m01节点上进行

建立kubeadm-config.yaml

cat <<EOF > kubeadm-config.yaml

apiVersion: kubeadm.k8s.io/v1beta2

kind: ClusterConfiguration

kubernetesVersion: v1.17.3

controlPlaneEndpoint: "apiserver.k8s.local:6443"

networking:

  podSubnet: "10.244.0.0/16"

imageRepository: "gcr.azk8s.cn/google_containers"

---

apiVersion: kubeproxy.config.k8s.io/v1alpha1

kind: KubeProxyConfiguration

mode: ipvs

EOF

## controlPlaneEndpoint 是api server的地址

## 添加host绑定

echo '127.0.0.1 apiserver.k8s.local' >> /etc/hosts

16. 使用kubeadm初始化control plane

## 如果网速太慢,可以先把image都pull下来

kubeadm config images pull --config=kubeadm-config.yaml

kubeadm init --config=kubeadm-config.yaml --upload-certs

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube

  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.

Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:

  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join apiserver.k8s.local:6443 --token 259pgq.fjrb8zpx3uzbsx8w

    --discovery-token-ca-cert-hash sha256:1f52834e5713cc55a2bf9516aa32246ac167d042270be2041aba491cd665b908

    --control-plane --certificate-key 39ed22131651a43bc0473e4fbc19fc7eb2540f8a3e7abb155ddad15a6394c4fb

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!

As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use

"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join apiserver.k8s.local:6443 --token 259pgq.fjrb8zpx3uzbsx8w

    --discovery-token-ca-cert-hash sha256:1f52834e5713cc55a2bf9516aa32246ac167d042270be2041aba491cd665b908

17. k8s-m01设置kubeconfig

mkdir -p $HOME/.kube

cp -rp /etc/kubernetes/admin.conf $HOME/.kube/config

chown $(id -u):$(id -g) $HOME/.kube/config

[root@k8s-m01 ~]# kubectl get no

NAME     STATUS     ROLES    AGE     VERSION

k8s-m01   NotReady   master   3m32s   v1.17.3

[root@k8s-m01 ~]# kubectl get cs

NAME                 STATUS    MESSAGE             ERROR

controller-manager   Healthy   ok                  

scheduler            Healthy   ok                  

etcd-0               Healthy   {"health":"true"}  

18. k8s-m01部署 Calico CNI plugin

wget https://docs.projectcalico.org/manifests/calico.yaml

kubectl apply -f calico.yaml

## 最新版的Calico已经不需要修改YAML配置文件中的Cluster CIDR,如果系统已经设置Cluster CIDR,它会自适应。

## 下载Calico管理工具

wget -O /usr/local/bin/calicoctl https://github.com/projectcalico/calicoctl/releases/download/v3.13.0/calicoctl

chmod +x /usr/local/bin/calicoctl

export DATASTORE_TYPE=kubernetes

export KUBECONFIG=~/.kube/config

[root@k8s-m01 ~]# calicoctl get ippool -o wide

NAME                  CIDR            NAT    IPIPMODE   VXLANMODE   DISABLED   SELECTOR   

default-ipv4-ippool   10.244.0.0/16   true   Always     Never       false      all() 

19. 在其他k8s-mater节点上执行

将其他两个master节点加入进集群

## api域名先指向k8s-m01

echo '192.168.1.171 apiserver.k8s.local' >> /etc/hosts

kubeadm join apiserver.k8s.local:6443 --token 259pgq.fjrb8zpx3uzbsx8w

    --discovery-token-ca-cert-hash sha256:1f52834e5713cc55a2bf9516aa32246ac167d042270be2041aba491cd665b908

    --control-plane --certificate-key 39ed22131651a43bc0473e4fbc19fc7eb2540f8a3e7abb155ddad15a6394c4fb

mkdir -p $HOME/.kube

cp -rp /etc/kubernetes/admin.conf $HOME/.kube/config

chown $(id -u):$(id -g) $HOME/.kube/config

## 然后再将api域名指向本地

sed -i 's#192.168.1.171 apiserver.k8s.local#127.0.0.1 apiserver.k8s.local#g' /etc/hosts

21. 配置haproxy

## 使用haproxy来提供 Kubernetes API Server 的负载均衡,由于资源紧张故haproxy安装配置在k8s-n01节点上。

mkdir /usr/local/haproxy

tar -zxvf haproxy-1.9.10.tar.gz

cd haproxy-1.9.10

make TARGET=linux31## rhel7使用linux31,rhel6使用linux26,这个是根据内核版本确定的

make install PREFIX=/usr/local/haproxy

cat <<EOF > /usr/local/haproxy/conf/haproxy.cfg

global

  log 127.0.0.1 local0 info

defaults

  log global

  mode http

  option dontlognull

  timeout connect 5000ms

  timeout client 600000ms

  timeout server 600000ms

listen stats

  bind :9090

  mode http

  balance

  stats refresh 10s

  stats hide-version

  stats uri /haproxy_stats

  stats auth admin:admin123

  stats admin if TRUE

frontend kube-apiserver-https

   mode tcp

   bind :6443

   default_backend kube-apiserver-backend

backend kube-apiserver-backend

  mode tcp

  balance roundrobin

  stick-table type ip size 200k expire 30m

  stick on src

  server apiserver1 192.168.1.171:6443 check

  server apiserver2 192.168.1.172:6443 check

  server apiserver3 192.168.1.173:6443 check

EOF

## 配置rsyslog

vim /etc/rsyslog.conf

# Provides UDP syslog reception

$ModLoad imudp

$UDPServerRun 514

# 添加如下一行

local0.*          /var/log/haproxy.log

vim /etc/sysconfig/rsyslog

SYSLOGD_OPTIONS="-r -m 0 -c 2"

systemctl restart rsyslog

## 启动haproxy

/usr/local/haproxy/sbin/haproxy -f /usr/local/haproxy/conf/haproxy.conf &

22. k8s-n01加入集群

## 这里配置haproxy节点IP,由于haproxy配置在k8s-n01本地,所以写127.0.0.1。

echo '127.0.0.1 apiserver.k8s.local' >> /etc/hosts

kubeadm join apiserver.k8s.local:6443 --token 259pgq.fjrb8zpx3uzbsx8w

    --discovery-token-ca-cert-hash sha256:1f52834e5713cc55a2bf9516aa32246ac167d042270be2041aba491cd665b908

23. 检验成果

[root@k8s-m01 k8s]# kubectl get no -o wide

NAME      STATUS   ROLES    AGE   VERSION   INTERNAL-IP     EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION                CONTAINER-RUNTIME

k8s-m01   Ready    master   20h   v1.17.3   192.168.1.171   <none>        CentOS Linux 7 (Core)   3.10.0-1062.12.1.el7.x86_64   docker://19.3.7

k8s-m02   Ready    master   20h   v1.17.3   192.168.1.172   <none>        CentOS Linux 7 (Core)   3.10.0-1062.12.1.el7.x86_64   docker://19.3.7

k8s-m03   Ready    master   20h   v1.17.3   192.168.1.173   <none>        CentOS Linux 7 (Core)   3.10.0-1062.12.1.el7.x86_64   docker://19.3.7

k8s-n01   Ready    <none>   55m   v1.17.3   192.168.1.174   <none>        CentOS Linux 7 (Core)   3.10.0-1062.12.1.el7.x86_64   docker://19.3.7

  • 版权声明:文章来源于网络采集,版权归原创者所有,均已注明来源,如未注明可能来源未知,如有侵权请联系管理员删除。

发表回复

后才能评论