ljzsdut
GitHubToggle Dark/Light/Auto modeToggle Dark/Light/Auto modeToggle Dark/Light/Auto modeBack to homepage

1.3 K8s部署之二进制高可用

Kubernetes高可用集群二进制部署实战

本文档自动化安装脚本及其他相关文件请参考https://github.com/ljzsdut/k8sbin-shell.git

零、准备工作

master、node都需要执行:修改主机名、IP、hosts文件、防火墙、selinux、禁用swap、时间同步

nmcli connection modify eth0 ipv4.addr "192.168.5.181/24" ipv4.gateway "192.168.5.1" ipv4.dns "192.168.5.1" ipv4.method manual autoconnect yes
nmcli dev connect eth0

setenforce 0 \
&& hostnamectl set-hostname k8s-node01 \
&& exec bash

cat >/etc/hosts <<EOF
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.5.181 k8s-master01 k8s-master01.ljzsdut.com
192.168.5.182 k8s-master02 k8s-master02.ljzsdut.com
192.168.5.183 k8s-master03 k8s-master03.ljzsdut.com
192.168.5.184 k8s-node01 k8s-node01.ljzsdut.com
EOF
systemctl stop firewalld && systemctl disable firewalld && setenforce 0 && sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux 
swapoff -a && sed -i 's/.swap./#&/' /etc/fstab
cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
yum install -y ntp
cat <<EOF >/etc/ntp.conf 
driftfile /var/lib/ntp/drift
restrict default nomodify notrap nopeer noquery
restrict 127.0.0.1 
restrict ::1
restrict 192.168.5.0 mask 255.255.255.0 nomodify notrap
server 0.cn.pool.ntp.org
server 1.cn.pool.ntp.org
server 2.cn.pool.ntp.org
server 3.cn.pool.ntp.org
includefile /etc/ntp/crypto/pw
keys /etc/ntp/keys
disable monitor
EOF
systemctl restart ntpd
systemctl enable ntpd

一、生成证书

本次演示在集群外的一台机器上生成:

K8S_MASTER_01=192.168.5.181@@@k8s-master01
K8S_MASTER_02=192.168.5.182@@@k8s-master02
K8S_MASTER_03=192.168.5.183@@@k8s-master03
K8S_NODE_01=192.168.5.184@@@k8s-node01
K8S_NODE_02=192.168.5.185@@@k8s-node02
VIP=192.168.5.180
SERVICE_IP="10.96.0.1"
BASE_DIR=~/K8S

[ -d ${BASE_DIR}/certs ] || mkdir -p ${BASE_DIR}/certs ; cd ${BASE_DIR}/certs

cat > openssl.cnf << EOF
[ req ]
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_ca ]
basicConstraints = critical, CA:TRUE
keyUsage = critical, digitalSignature, keyEncipherment, keyCertSign
[ v3_req_server ]
basicConstraints = CA:FALSE
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth
[ v3_req_client ]
basicConstraints = CA:FALSE
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth
[ v3_req_apiserver ]
basicConstraints = CA:FALSE
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth
subjectAltName = @alt_names_cluster
[ v3_req_etcd ]
basicConstraints = CA:FALSE
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth, clientAuth
subjectAltName = @alt_names_etcd
[ alt_names_cluster ]
DNS.1 = kubernetes
DNS.2 = kubernetes.default
DNS.3 = kubernetes.default.svc
DNS.4 = kubernetes.default.svc.cluster.local
DNS.5 = ${K8S_MASTER_01##*@@@}
DNS.6 = ${K8S_MASTER_02##*@@@}
DNS.7 = ${K8S_MASTER_03##*@@@}
IP.1 = ${K8S_MASTER_01//@@@*}
IP.2 = ${K8S_MASTER_02//@@@*}
IP.3 = ${K8S_MASTER_03//@@@*}
IP.4 = ${SERVICE_IP}
IP.5 = ${VIP}
[ alt_names_etcd ]
DNS.1 = ${K8S_MASTER_01##*@@@}
DNS.2 = ${K8S_MASTER_02##*@@@}
DNS.3 = ${K8S_MASTER_03##*@@@}
IP.1 = ${K8S_MASTER_01//@@@*}
IP.2 = ${K8S_MASTER_02//@@@*}
IP.3 = ${K8S_MASTER_03//@@@*}
EOF

#ca
[ -d ${BASE_DIR}/certs/master-etc-kubernetes/pki ] || mkdir -p ${BASE_DIR}/certs/master-etc-kubernetes/pki ; cd ${BASE_DIR}/certs/master-etc-kubernetes/pki
openssl ecparam -name secp521r1 -genkey -noout -out ca.key
openssl req -x509 -new -sha256 -nodes -key ca.key -days 3650 -out ca.crt \
            -subj "/CN=kubernetes"  -extensions v3_ca -config ${BASE_DIR}/certs/openssl.cnf
            
#apiserver
openssl ecparam -name secp521r1 -genkey -noout -out apiserver.key
openssl req -new -sha256 -key apiserver.key -subj "/CN=kube-apiserver" \
  | openssl x509 -req -sha256 -CA ca.crt -CAkey ca.key -CAcreateserial \
                 -out apiserver.crt -days 3650 \
                 -extensions v3_req_apiserver -extfile ${BASE_DIR}/certs/openssl.cnf
                 
openssl ecparam -name secp521r1 -genkey -noout -out apiserver-kubelet-client.key
openssl req -new -key apiserver-kubelet-client.key \
            -subj "/CN=kube-apiserver-kubelet-client/O=system:masters" \
  | openssl x509 -req -sha256 -CA ca.crt -CAkey ca.key -CAcreateserial \
                 -out apiserver-kubelet-client.crt -days 3650 \
                 -extensions v3_req_client -extfile ${BASE_DIR}/certs/openssl.cnf
                 
#admin.key、admin.crt可以使用上面的apiserver-kubelet-client(.key|.crt)代替
openssl ecparam -name secp521r1 -genkey -noout -out admin.key
openssl req -new -key admin.key -subj "/CN=kubernetes-admin/O=system:masters" \
  | openssl x509 -req -sha256 -CA ca.crt -CAkey ca.key -CAcreateserial \
                 -out admin.crt -days 3650 -extensions v3_req_client \
                 -extfile ${BASE_DIR}/certs/openssl.cnf
                 
#生成admin.conf文件
cat > ../admin.conf << EOF
apiVersion: v1
kind: Config
clusters:
- name: ${CLUSTER_NAME}
  cluster:
    server: https://${VIP}:8443
    certificate-authority-data: $( openssl base64 -A -in ca.crt ) 
users:
- name: k8s-admin
  user:
    client-certificate-data: $( openssl base64 -A -in admin.crt ) 
    client-key-data: $( openssl base64 -A -in admin.key ) 
contexts:
- context:
    cluster: ${CLUSTER_NAME}
    user: k8s-admin
  name: k8s-admin@${CLUSTER_NAME}
current-context: k8s-admin@${CLUSTER_NAME}
EOF

rm -rf admin.key admin.crt

#controller-manager使用--service-account-private-key-file=/etc/kubernetes/pki/sa.key选项指定sa.key私钥文件,用于加密Service Account的Token。
#apiserver使用--service-account-key-file=/etc/kubernetes/pki/sa.pub选项指定sa.pub公钥,用于解密ServiceAccount的token。

openssl ecparam -name secp521r1 -genkey -noout -out sa.key
openssl ec -in sa.key -outform PEM -pubout -out sa.pub


openssl ecparam -name secp521r1 -genkey -noout -out kube-controller-manager.key
openssl req -new -sha256 -key kube-controller-manager.key \
            -subj "/CN=system:kube-controller-manager" \
  | openssl x509 -req -sha256 -CA ca.crt -CAkey ca.key -CAcreateserial \
                 -out kube-controller-manager.crt -days 3650 -extensions v3_req_client \
                 -extfile ${BASE_DIR}/certs/openssl.cnf
                 
#生成controller-manager.conf,此处我们采用证书的方式
cat > ../controller-manager.conf << EOF
apiVersion: v1
kind: Config
clusters:
- name: ${CLUSTER_NAME}
  cluster:
    server: https://${VIP}:6443
    certificate-authority-data: $( openssl base64 -A -in ca.crt ) 
users:
- name: system:kube-controller-manager
  user:
    client-certificate-data: $( openssl base64 -A -in kube-controller-manager.crt ) 
    client-key-data: $( openssl base64 -A -in kube-controller-manager.key ) 
contexts:
- context:
    cluster: ${CLUSTER_NAME}
    user: system:kube-controller-manager
  name: system:kube-controller-manager@${CLUSTER_NAME}
current-context: system:kube-controller-manager@${CLUSTER_NAME}
EOF

rm -rf kube-controller-manager.crt kube-controller-manager.key


openssl ecparam -name secp521r1 -genkey -noout -out kube-scheduler.key
openssl req -new -sha256 -key kube-scheduler.key \
            -subj "/CN=system:kube-scheduler" \
  | openssl x509 -req -sha256 -CA ca.crt -CAkey ca.key -CAcreateserial \
                 -out kube-scheduler.crt -days 3650 -extensions v3_req_client \
                 -extfile ${BASE_DIR}/certs/openssl.cnf
                 
#生成scheduler.conf文件
cat > ../scheduler.conf << EOF
apiVersion: v1
kind: Config
clusters:
- name: ${CLUSTER_NAME}
  cluster:
    server: https://${VIP}:6443
    certificate-authority-data: $( openssl base64 -A -in ca.crt ) 
users:
- name: system:kube-scheduler
  user:
    client-certificate-data: $( openssl base64 -A -in kube-scheduler.crt ) 
    client-key-data: $( openssl base64 -A -in kube-scheduler.key ) 
contexts:
- context:
    cluster: ${CLUSTER_NAME}
    user: system:kube-scheduler
  name: system:kube-scheduler@${CLUSTER_NAME}
current-context: system:kube-scheduler@${CLUSTER_NAME}
EOF

rm -rf kube-scheduler.crt kube-scheduler.key


[ -d ${BASE_DIR}/certs/node-etc-kubernetes/pki ] || mkdir -p ${BASE_DIR}/certs/node-etc-kubernetes/pki
openssl ecparam -name secp521r1 -genkey -noout -out kube-proxy.key
openssl req -new -key kube-proxy.key \
            -subj "/CN=system:kube-proxy" \
  | openssl x509 -req -sha256 -CA ca.crt -CAkey ca.key -CAcreateserial \
                 -out kube-proxy.crt -days 3650 -extensions v3_req_client \
                 -extfile ${BASE_DIR}/certs/openssl.cnf
                 
#生成kube-proxy.conf文件
cat > ${BASE_DIR}/certs/node-etc-kubernetes/kube-proxy.conf << EOF
apiVersion: v1
kind: Config
clusters:
- name: ${CLUSTER_NAME}
  cluster:
    server: https://${VIP}:8443
    certificate-authority-data: $( openssl base64 -A -in ca.crt ) 
users:
- name: system:kube-proxy
  user:
    client-certificate-data: $( openssl base64 -A -in kube-proxy.crt ) 
    client-key-data: $( openssl base64 -A -in kube-proxy.key ) 
contexts:
- context:
    cluster: ${CLUSTER_NAME}
    user: system:kube-proxy
  name: system:kube-proxy@${CLUSTER_NAME}
current-context: system:kube-proxy@${CLUSTER_NAME}
EOF

rm -rf kube-proxy.crt kube-proxy.key
cp -rf ca.crt ${BASE_DIR}/certs/node-etc-kubernetes/pki



#BootStrap Token
VIP=192.168.5.180
CLUSTER_NAME=kubernetes
BOOTSTRAP_TOKEN="$(head -c 6 /dev/urandom | md5sum | head -c 6).$(head -c 16 /dev/urandom | md5sum | head -c 16)"
echo "$BOOTSTRAP_TOKEN,\"system:bootstrapper\",10001,\"system:bootstrappers\"" > ${BASE_DIR}/certs/master-etc-kubernetes/token.csv

#生成bootstrap.conf文件
cat > ${BASE_DIR}/certs/node-etc-kubernetes/bootstrap.conf << EOF
apiVersion: v1
kind: Config
clusters:
- name: ${CLUSTER_NAME}
  cluster:
    server: https://${VIP}:8443
    certificate-authority-data: $( openssl base64 -A -in ca.crt ) 
users:
- name: system:bootstrapper
  user:
    token: ${BOOTSTRAP_TOKEN}
contexts:
- context:
    cluster: ${CLUSTER_NAME}
    user: system:bootstrapper
  name: system:bootstrapper@${CLUSTER_NAME}
current-context: system:bootstrapper@${CLUSTER_NAME}
EOF


openssl ecparam -name secp521r1 -genkey -noout -out front-proxy-ca.key
openssl req -x509 -new -sha256 -nodes -key front-proxy-ca.key -days 3650 \
            -out front-proxy-ca.crt -subj "/CN=front-proxy-ca" \
            -extensions v3_ca -config ${BASE_DIR}/certs/openssl.cnf
            
openssl ecparam -name secp521r1 -genkey -noout -out front-proxy-client.key
openssl req -new -sha256 -key front-proxy-client.key \
            -subj "/CN=front-proxy-client" \
  | openssl x509 -req -sha256 -CA front-proxy-ca.crt \
                 -CAkey front-proxy-ca.key -CAcreateserial \
                 -out front-proxy-client.crt -days 3650 \
                 -extensions v3_req_client -extfile ${BASE_DIR}/certs/openssl.cnf
                 
[ -d ${BASE_DIR}/certs/etc-etcd/pki ] || mkdir -p ${BASE_DIR}/certs/etc-etcd/pki ; cd ${BASE_DIR}/certs/etc-etcd/pki
openssl ecparam -name secp521r1 -genkey -noout -out ca.key
openssl req -x509 -new -sha256 -nodes -key ca.key -days 3650 \
            -out ca.crt -subj "/CN=etcd-ca" -extensions v3_ca \
            -config ${BASE_DIR}/certs/openssl.cnf
            
            
openssl ecparam -name secp521r1 -genkey -noout -out ${BASE_DIR}/certs/master-etc-kubernetes/pki/apiserver-etcd-client.key
openssl req -new -sha256 -key ${BASE_DIR}/certs/master-etc-kubernetes/pki/apiserver-etcd-client.key -subj "/CN=etcd-client" \
  | openssl x509 -req -sha256 -CA ca.crt -CAkey ca.key \
                 -CAcreateserial -out ${BASE_DIR}/certs/master-etc-kubernetes/pki/apiserver-etcd-client.crt -days 3650 \
                 -extensions v3_req_client -extfile ${BASE_DIR}/certs/openssl.cnf
                 
openssl ecparam -name secp521r1 -genkey -noout -out server.key
openssl req -new -sha256 -key server.key -subj "/CN=etcd-server" \
  | openssl x509 -req -sha256 -CA ca.crt -CAkey ca.key \
                 -CAcreateserial -out server.crt -days 3650 \
                 -extensions v3_req_etcd -extfile ${BASE_DIR}/certs/openssl.cnf
                 
openssl ecparam -name secp521r1 -genkey -noout -out peer.key
openssl req -new -sha256 -key peer.key -subj "/CN=etcd-peer" \
  | openssl x509 -req -sha256 -CA ca.crt -CAkey ca.key \
                 -CAcreateserial -out peer.crt -days 3650 \
                 -extensions v3_req_etcd -extfile ${BASE_DIR}/certs/openssl.cnf                 

二、部署etcd集群

3个节点使用yum安装etcd

[root@k8s-master01 ~]# yum install -y etcd

拷贝etcd证书和etcd.conf,并修改etcd.conf配置(3个节点都需要此操作)

#拷贝配置文件
[root@k8s-master01 ~]# scp -rp 192.168.5.150:k8s-bin-inst/etcd/etcd.conf /etc/etcd/
root@192.168.5.150's password: 
etcd.conf                                                      100% 1985     1.5MB/s   00:00    
[root@k8s-master02 ~]# vim /etc/etcd/etcd.conf  #主要修改如下配置
ETCD_LISTEN_PEER_URLS="https://192.168.5.181:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.5.181:2379"
ETCD_NAME="k8s-etcd01"
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.5.182:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.5.182:2379"
ETCD_INITIAL_CLUSTER="k8s-etcd01=https://192.168.5.181:2380,k8s-etcd02=https://192.168.5.182:2380,k8s-etcd03=https://192.168.5.183:2380"

可以使用如下命令修改:
K8S_MASTER_01=192.168.5.181@k8s-master01
K8S_MASTER_02=192.168.5.182@k8s-master02
K8S_MASTER_03=192.168.5.183@k8s-master03

LOCAL_IP=`ip addr |grep "inet "|grep -v "127.0.0.1"|awk '{print $2}'|awk -F"/" '{print $1}'|head -1`
sed -i 's/ETCD_LISTEN_PEER_URLS=.*/ETCD_LISTEN_PEER_URLS="https:\/\/'${LOCAL_IP}':2380"/g' /etc/etcd/etcd.conf
sed -i 's/ETCD_LISTEN_CLIENT_URLS=.*/ETCD_LISTEN_CLIENT_URLS="https:\/\/'${LOCAL_IP}':2379"/g' /etc/etcd/etcd.conf
sed -i 's/ETCD_NAME=.*/ETCD_NAME="'${HOSTNAME}'"/g' /etc/etcd/etcd.conf
sed -i 's/ETCD_INITIAL_ADVERTISE_PEER_URLS=.*/ETCD_INITIAL_ADVERTISE_PEER_URLS="https:\/\/'${LOCAL_IP}':2380"/g' /etc/etcd/etcd.conf
sed -i 's/ETCD_ADVERTISE_CLIENT_URLS=.*/ETCD_ADVERTISE_CLIENT_URLS="https:\/\/'${LOCAL_IP}':2379"/g' /etc/etcd/etcd.conf
sed -i 's/ETCD_INITIAL_CLUSTER=.*/ETCD_INITIAL_CLUSTER="k8s-master01=https:\/\/'${K8S_MASTER_01//@*}':2380,k8s-master02=https:\/\/'${K8S_MASTER_02//@*}':2380,k8s-master03=https:\/\/'${K8S_MASTER_03//@*}':2380"/g' /etc/etcd/etcd.conf

#拷贝证书文件
[root@k8s-master01 ~]# scp -rp 192.168.5.150:/root/k8s-certs-generator/etcd/* /etc/etcd/
root@192.168.5.150's password: 
etcd-client-cert.patch                                         100% 4464   251.3KB/s   00:00    
ca.key                                                         100% 3243     1.0MB/s   00:00    
ca.crt                                                         100% 1814     1.1MB/s   00:00    
peer.key                                                       100% 1679   966.2KB/s   00:00    
server.key                                                     100% 1679   974.6KB/s   00:00    
apiserver-etcd-client.key                                      100% 1675     1.0MB/s   00:00    
client.key                                                     100% 1675   943.5KB/s   00:00    
peer.crt                                                       100% 1692     1.0MB/s   00:00    
server.crt                                                     100% 1679     1.0MB/s   00:00    
apiserver-etcd-client.crt                                      100% 1570   943.3KB/s   00:00    
client.crt                                                     100% 1570   980.5KB/s   00:00    

#启动etcd。需要3个节点同时启动,然后让他们选举
[root@k8s-master01 ~]# systemctl daemon-reload && systemctl start etcd ; systemctl status etcd

#查看是否部署成功
[root@k8s-master01 ~]# etcdctl --key-file=/etc/etcd/pki/peer.key -cert-file=/etc/etcd/pki/peer.crt --ca-file=/etc/etcd/pki/ca.crt --endpoints="https://192.168.5.181:2379" cluster-health
member f5c2deba5941f1b is healthy: got healthy result from https://192.168.5.181:2379
member df4577fda09d46f6 is healthy: got healthy result from https://192.168.5.183:2379
member ed252f810db765a4 is healthy: got healthy result from https://192.168.5.182:2379
cluster is healthy

三、部署apiserver集群

下载二进制文件

[root@k8s-master01 ~]# scp 192.168.5.150:/tmp/kubernetes-server-linux-amd64.tar.gz  /tmp
[root@k8s-master01 ~]# tar xf /tmp/kubernetes-server-linux-amd64.tar.gz -C /usr/local/
[root@k8s-master01 ~]# echo 'export PATH=/usr/local/kubernetes/server/bin:$PATH' >>/etc/profile
[root@k8s-master01 ~]# source /etc/profile

拷贝配置文件、证书文件、unit-file

[root@k8s-master01 ~]# mkdir /etc/kubernetes
#拷贝证书
[root@k8s-master01 ~]# scp -rp 192.168.5.150:/root/k8s-certs-generator/kubernetes/k8s-master01/* /etc/kubernetes/ 
root@192.168.5.150's password: 
controller-manager.conf                                        100% 7135   257.1KB/s   00:00    
scheduler.conf                                                 100% 7087     3.4MB/s   00:00    
admin.conf                                                     100% 7031     3.9MB/s   00:00    
apiserver.key                                                  100% 1679   117.6KB/s   00:00    
kube-controller-manager.key                                    100% 1675     1.0MB/s   00:00    
kube-scheduler.key                                             100% 1679   906.3KB/s   00:00    
apiserver-kubelet-client.key                                   100% 1675   778.7KB/s   00:00    
apiserver.crt                                                  100% 1874     1.1MB/s   00:00    
kube-controller-manager.crt                                    100% 1667   923.5KB/s   00:00    
kube-scheduler.crt                                             100% 1655   953.8KB/s   00:00    
apiserver-kubelet-client.crt                                   100% 1651   952.2KB/s   00:00    
ca.crt                                                         100% 1663   867.9KB/s   00:00    
ca.key                                                         100% 3247     2.0MB/s   00:00    
front-proxy-ca.crt                                             100%  989   369.0KB/s   00:00    
front-proxy-ca.key                                             100% 1679   937.1KB/s   00:00    
front-proxy-client.crt                                         100% 1277   775.6KB/s   00:00    
front-proxy-client.key                                         100% 1679   951.2KB/s   00:00    
sa.key                                                         100%  365   215.7KB/s   00:00    
sa.pub                                                         100%  268   171.7KB/s   00:00    
apiserver-etcd-client.key                                      100% 1675   919.9KB/s   00:00    
apiserver-etcd-client.crt                                      100% 1570   915.9KB/s   00:00    
token.csv                                                      100%   75    49.2KB/s   00:00    


#拷贝配置文件,做相应的修改
[root@k8s-master01 ~]# scp -rp 192.168.5.150:/root/k8s-bin-inst/master/etc/kubernetes/* /etc/kubernetes/
root@192.168.5.150's password: 
apiserver                                                      100% 1926   852.3KB/s   00:00    
config                                                         100%  538   351.4KB/s   00:00    
controller-manager                                             100% 1018   639.4KB/s   00:00    
scheduler                                                      100%  211   128.9KB/s   00:00    
[root@k8s-master01 ~]# vim /etc/kubernetes/apiserver 
KUBE_ETCD_SERVERS="--etcd-servers=https://192.168.5.181:2379,https://192.168.5.182:2379,https://192.168.5.183:2379"

可以执行如下命令:
K8S_MASTER_01=192.168.5.181@k8s-master01
K8S_MASTER_02=192.168.5.182@k8s-master02
K8S_MASTER_03=192.168.5.183@k8s-master03
sed -i 's/KUBE_ETCD_SERVERS=.*/KUBE_ETCD_SERVERS="--etcd-servers=https:\/\/'${K8S_MASTER_01//@*}':2379,https:\/\/'${K8S_MASTER_02//@*}':2379,https:\/\/'${K8S_MASTER_03//@*}':2379"/g' /etc/kubernetes/apiserver 


#拷贝unit-file
[root@k8s-master01 ~]# scp -rp 192.168.5.150:/root/k8s-bin-inst/master/unit-files/* /usr/lib/systemd/system
root@192.168.5.150's password: 
kube-apiserver.service                                         100%  644   868.3KB/s   00:00    
kube-controller-manager.service                                100%  473   567.0KB/s   00:00    
kube-scheduler.service                                         100%  444   579.4KB/s   00:00    

使用kube用户运行apiserver:unit-file中设置运行用户为kube用户,User=kube

[root@k8s-master01 ~]# useradd -r kube  
[root@k8s-master01 ~]# mkdir /var/run/kubernetes
[root@k8s-master01 ~]# chown kube:kube /var/run/kubernetes
[root@k8s-master01 ~]# systemctl daemon-reload
[root@k8s-master01 ~]# systemctl start kube-apiserver

验证是否配置成功

[root@k8s-master01 ~]# mkdir ~/.kube
[root@k8s-master01 ~]# cp -rp /etc/kubernetes/auth/admin.conf ~/.kube/config
[root@k8s-master01 ~]# kubectl get nodes
No resources found.

master02和master03配置方法同master01

部署负载均衡

此处我们采用haproxy+keepalived组合,当然也可以使用nginx+keepalived组合

3台主机安装haproxy:监听8443,后端为6443

[root@k8s-master01 ~]# yum install -y haproxy
#修改配置文件如下
[root@k8s-master01 ~]# 
K8S_MASTER_01=192.168.5.181@k8s-master01
K8S_MASTER_02=192.168.5.182@k8s-master02
K8S_MASTER_03=192.168.5.183@k8s-master03
cat >/etc/haproxy/haproxy.cfg <<EOF
global
        log /dev/log    local0
        log /dev/log    local1 notice
        chroot /var/lib/haproxy
        stats socket /run/haproxy.sock mode 660 level admin
        stats timeout 30s
        user haproxy
        group haproxy
        daemon
        nbproc 1

defaults
        log     global
        timeout connect 5000
        timeout client  10m
        timeout server  10m

listen kube-master
        bind 0.0.0.0:8443
        mode tcp
        option tcplog
        balance roundrobin 
        server ${K8S_MASTER_01##*@} ${K8S_MASTER_01//@*}:6443 check inter 2000 fall 2 rise 2 weight 1
        server ${K8S_MASTER_02##*@} ${K8S_MASTER_02//@*}:6443 check inter 2000 fall 2 rise 2 weight 1
        server ${K8S_MASTER_03##*@} ${K8S_MASTER_03//@*}:6443 check inter 2000 fall 2 rise 2 weight 1
EOF

#启动
[root@k8s-master01 ~]# systemctl start haproxy

3台master安装keepalived

[root@k8s-master01 ~]# yum install -y psmisc        #安装killall命令
[root@k8s-master01 ~]# yum install -y keepalived

#修改配置文件
[root@k8s-master01 ~]# cat /etc/keepalived/keepalived.conf
global_defs {
    router_id kube-master
}

vrrp_script check-haproxy {
    script "killall -0 haproxy"
    interval 5
    weight -60
}

vrrp_instance VI-kube-master {
    state  MASTER                       #BACKUP或者为MASTER
    priority 120                        #不同节点的差值在60之内,因为“weight -60”
    unicast_src_ip 192.168.5.181        #当前节点ip
    unicast_peer {
        192.168.5.182                   #其他节点ip列表
        192.168.5.183
    }
    dont_track_primary
    interface eth0                      #指定网卡
    virtual_router_id 111               #同一个集群中,该值要相同,表示一个广播组。本例采用单播
    advert_int 3
    track_script {
        check-haproxy
    }
    virtual_ipaddress {
        192.168.5.180                   #指定VIP
    }
}

#启动
[root@k8s-master01 ~]# systemctl start keepalived

四、部署Controller-manager集群

#查看配置文件
[root@k8s-master01 ~]# vim /etc/kubernetes/controller-manager
说明:请根据不同网络插件选择不同的值 --cluster-cidr=10.244.0.0/16 
[root@k8s-master01 ~]# systemctl start kube-controller-manager

五、部署scheduler集群

#查看配置文件
[root@k8s-master01 ~]# vim /etc/kubernetes/scheduler 
[root@k8s-master01 ~]# systemctl start kube-scheduler

六、部署node节点

安装docker-ce

cd /etc/yum.repos.d/
curl -O https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
cat <<-EOF >kubernets.repo
[Kubernetes]
name=kubernetes repo
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
enabled=1
EOF
yum repolist #检查仓库是否配置正确,如果出现以上配置的两个仓库的信息,说明配置正确
yum install -y docker-ce

[root@k8s-node01 ~]# vi /usr/lib/systemd/system/docker.service  #可选
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
#添加如下两行代理设置
Environment="HTTPS_PROXY=http://www.ik8s.io:10081"
Environment="NO_PROXY=127.0.0.0/8,192.168.5.0/24"
[root@k8s-node01 ~]# systemctl daemon-reload && systemctl start docker  && systemctl enable docker  
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.

#查看docker是否部署成功
[root@k8s-node01 ~]# docker info
Containers: 10
 Running: 6
 Paused: 0
 Stopped: 4
Images: 3
Server Version: 18.09.0
Storage Driver: overlay2
 Backing Filesystem: xfs
 Supports d_type: true
 Native Overlay Diff: true
Logging Driver: json-file
Cgroup Driver: cgroupfs
Plugins:
 Volume: local
 Network: bridge host macvlan null overlay
 Log: awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog
Swarm: inactive
Runtimes: runc
Default Runtime: runc
Init Binary: docker-init
containerd version: c4446665cb9c30056f4998ed953e6d4ff22c7c39
runc version: 4fc53a81fb7c994640722ac585fa9ca548971871
init version: fec3683
Security Options:
 seccomp
  Profile: default
Kernel Version: 3.10.0-957.el7.x86_64
Operating System: CentOS Linux 7 (Core)
OSType: linux
Architecture: x86_64
CPUs: 4
Total Memory: 7.637GiB
Name: k8s-node01
ID: 4T55:T464:XATA:CNSP:SYCT:6XFA:MS5D:GVAF:QWH4:DX5Q:63OM:OF72
Docker Root Dir: /var/lib/docker
Debug Mode (client): false
Debug Mode (server): false
HTTPS Proxy: http://www.ik8s.io:10081
No Proxy: 127.0.0.0/8,192.168.5.0/24
Registry: https://index.docker.io/v1/
Labels:
Experimental: false
Insecure Registries:
 127.0.0.0/8
Live Restore Enabled: false
Product License: Community Engine

下载二进制文件

#下载kubelet
[root@k8s-node01 ~]# scp -rp 192.168.5.150:/tmp/kubernetes-node-linux-amd64.tar.gz /tmp
root@192.168.5.150's password: 
kubernetes-node-linux-amd64.tar.gz            100%   87MB  39.7MB/s   00:02 
[root@k8s-node01 ~]# tar xf /tmp/kubernetes-node-linux-amd64.tar.gz -C /usr/local/
[root@k8s-node01 ~]# echo 'export PATH=/usr/local/kubernetes/node/bin:$PATH' >>/etc/profile && source /etc/profile

#下载cni插件
[root@k8s-node01 ~]#  scp -rp -o StrictHostKeyChecking=no 192.168.5.150:/tmp/cni-plugins-amd64-v0.7.4.tgz /tmp

下载地址:https://github.com/containernetworking/plugins/releases/download/v0.7.4/cni-plugins-amd64-v0.7.4.tgz
[root@k8s-node01 ~]# mkdir -p /opt/cni/bin && tar xf /tmp/cni-plugins-amd64-v0.7.4.tgz -C /opt/cni/bin/

拷贝文件:

[root@k8s-node01 ~]# mkdir /etc/kubernetes
#拷贝证书
[root@k8s-node01 ~]# scp -rp 192.168.5.150:/root/k8s-certs-generator/kubernetes/kubelet/* /etc/kubernetes/
root@192.168.5.150's password: 
kube-proxy.conf                               100% 6984     4.8MB/s   00:00    
bootstrap.conf                                100% 2601     2.4MB/s   00:00    
kube-proxy.key                                100% 1675     1.8MB/s   00:00    
kube-proxy.crt                                100% 1602     1.8MB/s   00:00    
ca.crt                                        100% 1663     1.9MB/s   00:00    

#配置文件:config为公用的配置文件,proxy和kubelet分别为各自的配置文件
[root@k8s-node01 ~]# scp -rp 192.168.5.150:/root/k8s-bin-inst/nodes/etc/kubernetes/* /etc/kubernetes/
root@192.168.5.150's password: 
config                                        100%  538   552.1KB/s   00:00    
kubelet                                       100%  446   577.7KB/s   00:00    
proxy                                         100%  106   126.4KB/s   00:00    

#unit-file文件
[root@k8s-node01 ~]# scp -rp 192.168.5.150:/root/k8s-bin-inst/nodes/unit-files/* /usr/lib/systemd/system/
root@192.168.5.150's password: 
kubelet.service                               100%  607   322.7KB/s   00:00    
kube-proxy.service                            100%  442   273.2KB/s   00:00    

#--config指定的yaml配置文件
[root@k8s-node01 ~]# scp -rp 192.168.5.150:/root/k8s-bin-inst/nodes/var/lib/* /var/lib/
root@192.168.5.150's password: 
config.yaml                                   100% 1713   660.4KB/s   00:00    
config.yaml                                   100%  785    54.7KB/s   00:00    

创建clusterrolebinding,用于引导node节点。部署多个node时,只需要创建一次。

[root@k8s-master01 ~]# kubectl create clusterrolebinding system:bootstrapper --user=system:bootstrapper --clusterrole=system:node-bootstrapper
clusterrolebinding.rbac.authorization.k8s.io/system:bootstrapper created

(可能不行)
[root@k8s-master01 ~]# kubectl create clusterrolebinding system:bootstrapper --group=system:bootstrappers --clusterrole=system:bootstrapper

ipvs模块加载(可选,貌似启动kube-proxy时可以自动加载ipvs模块)

#创建/etc/sysconfig/modules/ipvs.modules文件
[root@k8s-node01 ~]# vim /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs"
for i in $(ls $ipvs_mods_dir | grep -o "^[^.]*");do
   /sbin/modinfo -F filename $i &> /dev/null
  if [ $? -eq 0 ];then
    /sbin/modprobe $i
  fi
done
[root@k8s-node01 ~]# sh +x /etc/sysconfig/modules/ipvs.modules
[root@k8s-node01 ~]# bash /etc/sysconfig/modules/ipvs.modules
#查看一下ipvs模块
[root@k8s-node01 ~]# lsmod | grep ip_vs
ip_vs_wlc              12519  0 
ip_vs_sed              12519  0 
ip_vs_pe_sip           12740  0 
nf_conntrack_sip       33860  1 ip_vs_pe_sip
ip_vs_nq               12516  0 
ip_vs_lc               12516  0 
ip_vs_lblcr            12922  0 
ip_vs_lblc             12819  0 
ip_vs_ftp              13079  0 
ip_vs_dh               12688  0 
ip_vs_sh               12688  0 
ip_vs_wrr              12697  0 
ip_vs_rr               12600  1 
ip_vs                 141473  25 ip_vs_dh,ip_vs_lc,ip_vs_nq,ip_vs_rr,ip_vs_sh,ip_vs_ftp,ip_vs_sed,ip_vs_wlc,ip_vs_wrr,ip_vs_pe_sip,ip_vs_lblcr,ip_vs_lblc
nf_nat                 26787  3 ip_vs_ftp,nf_nat_ipv4,nf_nat_masquerade_ipv4
nf_conntrack          133053  8 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_sip,nf_conntrack_ipv4
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack
模块加载成功。

启动kubelet和kube-proxy

[root@k8s-node01 ~]# mkdir -p /etc/kubernetes/manifests  #如果不创建,kubelet会报错,但不影响使用
[root@k8s-node01 ~]# systemctl daemon-reload
[root@k8s-node01 ~]# systemctl start kubelet
[root@k8s-node01 ~]# systemctl start kube-proxy

apiserver为node签发证书:Pending–>Approved。根据bootstrap-kubeconfig文件自动为node生成kubeconfig文件

[root@k8s-master01 ~]# kubectl get csr
NAME                                                   AGE   REQUESTOR             CONDITION
node-csr-IuDMCkY3K2nXM3mVX-a_rajiT1q32yyqpuhi8fAalac   42s   system:bootstrapper   Pending
[root@k8s-master01 ~]# kubectl certificate approve node-csr-IuDMCkY3K2nXM3mVX-a_rajiT1q32yyqpuhi8fAalac
certificatesigningrequest.certificates.k8s.io/node-csr-IuDMCkY3K2nXM3mVX-a_rajiT1q32yyqpuhi8fAalac approved
[root@k8s-master01 ~]# kubectl get csr
NAME                                                   AGE   REQUESTOR             CONDITION
node-csr-IuDMCkY3K2nXM3mVX-a_rajiT1q32yyqpuhi8fAalac   98s   system:bootstrapper   Approved,Issued

说明:签署通过后,会自动为kubelet生成kubeconfig文件:--kubeconfig=/etc/kubernetes/auth/kubelet.conf

七、部署插件:flannel、CoreDNS

为了让Pods间可以相互通信,我们必须安装一个网络插件,并且必须在部署任何应用之前安装,CoreDNS也是在网络插件安装之后才会启动的。

#部署flannel
[root@k8s-master01 ~]# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

#部署coredns
[root@k8s-master01 ~]# mkdir coredns && cd coredns
[root@k8s-master01 coredns]# curl -O https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/coredns.yaml.sed
[root@k8s-master01 coredns]# curl -O https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/deploy.sh
[root@k8s-master01 coredns]# bash deploy.sh -i 10.96.0.10 -r "10.96.0.0/12" -s -t coredns.yaml.sed | kubectl apply -f -

八、其他可选操作

命令补全

yum install -y bash-completion #会生成一个文件/usr/share/bash-completion/bash_completion
source /usr/share/bash-completion/bash_completion
echo 'source <(kubectl completion bash)' >>/etc/profile && source /etc/profile

docker-tag

使用示例:docker-tag ljzsdut/flannel 、docker-tag library/centos

yum install -y epel-release
yum install -y jq
cat >/usr/local/bin/docker-tag<<EOF
#!/bin/bash
curl -s -S "https://registry.hub.docker.com/v2/repositories/\$@/tags/" | jq '."results"[]["name"]' |sort
EOF
chmod +x /usr/local/bin/docker-tag