中文字幕av专区_日韩电影在线播放_精品国产精品久久一区免费式_av在线免费观看网站

溫馨提示×

溫馨提示×

您好,登錄后才能下訂單哦!

密碼登錄×
登錄注冊×
其他方式登錄
點擊 登錄注冊 即表示同意《億速云用戶服務條款》

kubeadm部署高可用K8S集群(v1.14.0)

發布時間:2020-07-29 18:47:11 來源:網絡 閱讀:2134 作者:FJCA 欄目:云計算

一、 集群規劃

主機名 IP 角色 主要插件
VIP 172.16.1.10 實現master高可用和負載均衡
k8s-master01 172.16.1.11 master kube-apiserver、kube-controller、kube-scheduler、kubelet、kube-proxy、kube-flannel、etcd
k8s-master02 172.16.1.12 master kube-apiserver、kube-controller、kube-scheduler、kubelet、kube-proxy、kube-flannel、etcd
k8s-master03 172.16.1.13 master kube-apiserver、kube-controller、kube-scheduler、kubelet、kube-proxy、kube-flannel、etcd
k8s-node01 172.16.1.21 node kubelet、kube-proxy、kube-flannel
k8s-node02 172.16.1.22 node kubelet、kube-proxy、kube-flannel

master節點通過keepalived和haproxy來實現的高可用和負載均衡,對于云主機可以直接使用相關云產品,例如阿里云的slb或者騰訊云的clb。

二、 準備工作

在所有節點上作如下準備

1. 硬件配置

建議至少2 CPU 、2G,非硬性要求,1CPU、1G也可以搭建起集群,但是在部署時會有WARNING提示:

#1個CPU的初始化master的時候會報
 [WARNING NumCPU]: the number of available CPUs 1 is less than the required 2
#部署插件或者pod時可能會報
warning:FailedScheduling:Insufficient cpu, Insufficient memory

2. 修改內核參數

cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10
net.ipv4.neigh.default.gc_stale_time = 120
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
net.ipv4.ip_forward = 1
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.netfilter.nf_conntrack_max = 2310720
fs.inotify.max_user_watches=89100
fs.may_detach_mounts = 1
fs.file-max = 52706963
fs.nr_open = 52706963
net.bridge.bridge-nf-call-arptables = 1
vm.swappiness = 0   #最大限度使用物理內存,然后才是 swap空間
vm.overcommit_memory=1
vm.panic_on_oom=0
EOF
sysctl --system

3. 關閉Swap

k8s1.8版本以后,要求關閉swap,否則默認配置下kubelet將無法啟動。

#臨時關閉
swapoff -a
#永久關閉
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

4. 開啟ipvs

modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
#查看是否加載
lsmod | grep ip_vs
#配置開機自加載
cat <<EOF>> /etc/rc.local
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod +x /etc/rc.d/rc.local

5. 禁用selinux

#臨時關閉
setenforce 0
#永久關閉
sed -ir 's/(SELINUX=)[a-z]*/\1diabled/' /etc/selinux/config

6. 關閉防火墻

systemctl stop firewalld
systemctl disable firewalld

7. 安裝docker

#獲取docker-ce的yum源
wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo 
#獲取epel源
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo 
yum -y install epel-release
#安裝docker
yum -y install docker-ce
docker version
systemctl start docker
systemctl enable docker

#注意
這里安裝沒有指定docker-ce版本,默認安裝最新版,如果需要安裝指定版本,可以:

#列出docker-ce版本
yum list docker-ce --showduplicates
#安裝指定版本
yum -y install docker-ce-<VERSION_STRING>

8. 其他

ssh免密登錄、hosts文件、ntp時間同步

三、 安裝配置keepalived、haproxy

master節點執行

1. 安裝

yum install -y socat keepalived haproxy ipvsadm
systemctl enable haproxy
systemctl enable keepalived

2. 配置

haproxy配置文件:

#/etc/haproxy/haproxy.cfg
global
    log         127.0.0.1 local3
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     32768
    user        haproxy
    group       haproxy
    daemon
    nbproc      1
    stats socket /var/lib/haproxy/stats

defaults
    mode                    tcp
    log                     global
    option                  tcplog
    option                  dontlognull
    option                  redispatch
    retries                 3
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout check           10s

listen stats
    mode   http
    bind :8888
    stats   enable
    stats   uri     /admin?stats
    stats   auth    admin:admin
    stats   admin   if TRUE

frontend  k8s_https *:8443
    mode      tcp
    maxconn      2000
    default_backend     https_sri

backend https_sri
    balance      roundrobin
    server master1-api 172.16.1.11:6443  check inter 10000 fall 2 rise 2 weight 1
    server master2-api 172.16.1.12:6443  check inter 10000 fall 2 rise 2 weight 1
    server master3-api 172.16.1.13:6443  check inter 10000 fall 2 rise 2 weight 1

keepalived配置文件:

#/etc/keepalived/keepalived.conf
global_defs {
   router_id master01
}

vrrp_script check_haproxy {
    script /etc/keepalived/check_haproxy.sh
    interval 3
}

vrrp_instance VI_1 {
    state MASTER
    interface ens192
    virtual_router_id 80
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        172.16.1.10/24
    }
    track_script {   
        check_haproxy
    }
}

}
#/etc/keepalived/check_haproxy.sh
#!/bin/bash
NUM=`ps -C haproxy --no-header |wc -l`
if [ $NUM -eq 0 ];then
    systemctl stop keepalived
fi

注意,三個節點keepalived配置文件存在區別:
router_id分別為master01、master02、master03
state分別為MASTER、BACKUP、BACKUP
priority分別為100、90、80

四、 K8S集群部署

1. 安裝 kubeadm、kubelet、kubectl

所有節點都安裝 kubeadm、kubelet、kubectl,注意:node節點的kubectl不是必須的。

#配置yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpghttps://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#安裝
yum -y install kubeadm-1.14.0 kubelet-1.14.0 kubectl-1.14.0
systemctl enable kubelet

2. 初始化master

通過kubeadm config print init-defaults > kubeadm.conf可以獲得默認配置文件。

#查看需要的鏡像
kubeadm config images list --config kubeadm.conf
#拉取需要的鏡像
kubeadm config images pull --config kubeadm.conf
#初始化
kubeadm init –config kubeadm.conf

(1) master01節點

配置文件kubeadm_master01.conf

apiVersion: kubeadm.k8s.io/v1beta1
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 172.16.1.11
  bindPort: 6443
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.14.0
controlPlaneEndpoint: "172.16.1.10:8443"
imageRepository: registry.aliyuncs.com/google_containers
apiServer:
  certSANs:
  - "k8s-master01"
  - "k8s-master02"
  - "k8s-master03"
  - 172.16.1.11
  - 172.16.1.12
  - 172.16.1.13
  - 172.16.1.10
networking:
  podSubnet: "10.244.0.0/16"
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes

etcd:
  local:
    extraArgs:
      listen-client-urls: "https://127.0.0.1:2379,https://172.16.1.11:2379"
      advertise-client-urls: "https://172.16.1.11:2379"
      listen-peer-urls: "https://172.16.1.11:2380"
      initial-advertise-peer-urls: "https://172.16.1.11:2380"
      initial-cluster: "k8s-master01=https://172.16.1.11:2380"
      initial-cluster-state: new
    serverCertSANs:
      - k8s-master01
      - 172.16.1.11
    peerCertSANs:
      - k8s-master01
      - 172.16.1.11
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
#初始化master01
kubeadm init --config kubeadm_master01.conf
#配置kubectl管理集群
mkdir .kube
cp -i /etc/kubernetes/admin.conf .kube/config

而后才可以執行kubectl相關命令,例如查看當前存在pod,可以發現唯獨coredns的pod是出于Pending狀態,原因是還未安裝網絡插件。

[root@k8s-master01 ~]# kubectl get pods -n kube-system
NAME                                   READY   STATUS    RESTARTS   AGE
coredns-8686dcc4fd-96lr9               0/1     Pending   0          40m
coredns-8686dcc4fd-xk9st               0/1     Pending   0          40m
etcd-k8s-master01                      1/1     Running   0          39m
kube-apiserver-k8s-master01            1/1     Running   0          39m
kube-controller-manager-k8s-master01   1/1     Running   0          39m
kube-proxy-2cb7r                       1/1     Running   0          40m
kube-scheduler-k8s-master01            1/1     Running   0          39m
#安裝Flannel網絡插件
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

#綁定網卡
flannel 默認會使用主機的第一張網卡,如果你有多張網卡,需要指定時,可以修改 kube-flannel.yml 中的以下部分

      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.11.0-amd64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        - --iface=ens192        #添加該行
#而后應用配置文件(注意只在master01執行)
kubectl apply -f kube-flannel.yml

#此時,我們再查看pod發現coredns已不是Pending狀態

[root@k8s-master01 ~]# kubectl get pods -n kube-system
NAME                                   READY   STATUS    RESTARTS   AGE
coredns-8686dcc4fd-96lr9               1/1     Running   0          54m
coredns-8686dcc4fd-xk9st               1/1     Running   0          54m
etcd-k8s-master01                      1/1     Running   0          53m
kube-apiserver-k8s-master01            1/1     Running   0          53m
kube-controller-manager-k8s-master01   1/1     Running   0          53m
kube-flannel-ds-amd64-4vg2s            1/1     Running   0          50s
kube-proxy-2cb7r                       1/1     Running   0          54m
kube-scheduler-k8s-master01            1/1     Running   0          53m

(2) 分發證書

執行如下腳本

#!/bin/bash
for index in 12 13; do
  ip=172.16.1.${index}
  ssh $ip "mkdir -p /etc/kubernetes/pki/etcd; mkdir -p ~/.kube/"
  scp /etc/kubernetes/pki/ca.crt $ip:/etc/kubernetes/pki/ca.crt
  scp /etc/kubernetes/pki/ca.key $ip:/etc/kubernetes/pki/ca.key
  scp /etc/kubernetes/pki/sa.key $ip:/etc/kubernetes/pki/sa.key
  scp /etc/kubernetes/pki/sa.pub $ip:/etc/kubernetes/pki/sa.pub
  scp /etc/kubernetes/pki/front-proxy-ca.crt $ip:/etc/kubernetes/pki/front-proxy-ca.crt
  scp /etc/kubernetes/pki/front-proxy-ca.key $ip:/etc/kubernetes/pki/front-proxy-ca.key
  scp /etc/kubernetes/pki/etcd/ca.crt $ip:/etc/kubernetes/pki/etcd/ca.crt
  scp /etc/kubernetes/pki/etcd/ca.key $ip:/etc/kubernetes/pki/etcd/ca.key
  scp /etc/kubernetes/admin.conf $ip:/etc/kubernetes/admin.conf
  scp /etc/kubernetes/admin.conf $ip:~/.kube/config
done

(3) master02節點

配置文件kubeadm_master02.conf

apiVersion: kubeadm.k8s.io/v1beta1
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 172.16.1.12
  bindPort: 6443
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.14.0
controlPlaneEndpoint: "172.16.1.10:8443"
imageRepository: registry.aliyuncs.com/google_containers
apiServer:
  certSANs:
  - "k8s-master01"
  - "k8s-master02"
  - "k8s-master03"
  - 172.16.1.11
  - 172.16.1.12
  - 172.16.1.13
  - 172.16.1.10
networking:
  podSubnet: "10.244.0.0/16"
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes

etcd:
  local:
    extraArgs:
      listen-client-urls: "https://127.0.0.1:2379,https://172.16.1.12:2379"
      advertise-client-urls: "https://172.16.1.12:2379"
      listen-peer-urls: "https://172.16.1.12:2380"
      initial-advertise-peer-urls: "https://172.16.1.12:2380"
      initial-cluster: "k8s-master01=https://172.16.1.11:2380,k8s-master02=https://172.16.1.12:2380"
      initial-cluster-state: existing
    serverCertSANs:
      - k8s-master02
      - 172.16.1.12
    peerCertSANs:
      - k8s-master02
      - 172.16.1.12
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
#配置證書
kubeadm init phase certs all --config kubeadm_master02.conf
#配置etcd
kubeadm init phase etcd local --config kubeadm_master02.conf
#生成kubelet配置文件
kubeadm init phase kubeconfig kubelet --config kubeadm_master02.conf
#啟動kubelet
kubeadm init phase kubelet-start --config kubeadm_master02.conf
#將master02的etcd加入集群
kubectl exec -n kube-system etcd-k8s-master01 -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://172.16.1.11:2379 member add master2 https://172.16.1.12:2380
#啟動 kube-apiserver、kube-controller-manager、kube-scheduler
kubeadm init phase kubeconfig all --config kubeadm_master02.conf
kubeadm init phase control-plane all --config kubeadm_master02.conf

#查看節點狀態

[root@k8s-master02 ~]# kubectl get nodes
NAME           STATUS   ROLES    AGE    VERSION
k8s-master01   Ready    master   3h3m   v1.14.0
k8s-master02   Ready    <none>   14m    v1.14.0
#將節點標記為master
kubeadm init phase mark-control-plane --config kubeadm_master02.conf

#再次查看

[root@k8s-master02 ~]# kubectl get nodes
NAME           STATUS   ROLES    AGE    VERSION
k8s-master01   Ready    master   3h4m   v1.14.0
k8s-master02   Ready    master   16m    v1.14.0

(4) master03節點

配置文件kubeadm_master03.conf

apiVersion: kubeadm.k8s.io/v1beta1
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 172.16.1.13
  bindPort: 6443
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.14.0
controlPlaneEndpoint: "172.16.1.10:8443"
imageRepository: registry.aliyuncs.com/google_containers
apiServer:
  certSANs:
  - "k8s-master01"
  - "k8s-master02"
  - "k8s-master03"
  - 172.16.1.11
  - 172.16.1.12
  - 172.16.1.13
  - 172.16.1.10
networking:
  podSubnet: "10.244.0.0/16"
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes

etcd:
  local:
    extraArgs:
      listen-client-urls: "https://127.0.0.1:2379,https://172.16.1.13:2379"
      advertise-client-urls: "https://172.16.1.13:2379"
      listen-peer-urls: "https://172.16.1.13:2380"
      initial-advertise-peer-urls: "https://172.16.1.13:2380"
      initial-cluster: "k8s-master01=https://172.16.1.11:2380,k8s-master02=https://172.16.1.12:2380,k8s-master03=https://172.16.1.13:2380"
      initial-cluster-state: existing
    serverCertSANs:
      - k8s-master03
      - 172.16.1.13
    peerCertSANs:
      - k8s-master03
      - 172.16.1.13
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
#配置證書
kubeadm init phase certs all --config kubeadm_master03.conf
#配置etcd
kubeadm init phase etcd local --config kubeadm_master03.conf
#生成kubelet配置文件
kubeadm init phase kubeconfig kubelet --config kubeadm_master03.conf
#啟動kubelet
kubeadm init phase kubelet-start --config kubeadm_master03.conf
#將master03的etcd加入集群
kubectl exec -n kube-system etcd-k8s-master01 -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://172.16.1.11:2379 member add master3 https://172.16.1.13:2380
#啟動 kube-apiserver、kube-controller-manager、kube-scheduler
kubeadm init phase kubeconfig all --config kubeadm_master03.conf
kubeadm init phase control-plane all --config kubeadm_master03.conf
#將節點標記為master
kubeadm init phase mark-control-plane --config kubeadm_master03.conf

通過以上步驟,三臺master已初始化完畢。

3.worker節點加入集群

#初始化master01時提示如下命令:
kubeadm join 172.16.1.10:8443 --token 8j5lga.y2cei06i6cfxbxmo \
--discovery-token-ca-cert-hash sha256:9eff14803a65631b74e4db6dfa9e7362eb1dd62cd76d56e840d33b1f5a3aa93b

4. 狀態檢查

#查看node信息

[root@k8s-master01 ~]# kubectl get nodes
NAME           STATUS   ROLES    AGE     VERSION
k8s-master01   Ready    master   3h29m   v1.14.0
k8s-master02   Ready    master   114m    v1.14.0
k8s-master03   Ready    master   95m     v1.14.0
k8s-node01     Ready    <none>   64m     v1.14.0
k8s-node02     Ready    <none>   50m     v1.14.0

#查看集群信息

[root@k8s-master01 ~]# kubectl cluster-info
Kubernetes master is running at https://172.16.1.10:8443
KubeDNS is running at https://172.16.1.10:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

#查看控制器狀態

[root@k8s-master01 ~]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   

#查看etcd集群成員信息

[root@k8s-master01 ~]# kubectl exec -n kube-system etcd-k8s-master01 -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://172.16.1.11:2379 member list 
2cd4d60db6db4371: name=k8s-master01 peerURLs=https://172.16.1.11:2380 clientURLs=https://172.16.1.11:2379 isLeader=true
707da0ac9cb69832: name=k8s-master02 peerURLs=https://172.16.1.12:2380 clientURLs=https://172.16.1.12:2379 isLeader=false
c702920d32ced638: name=k8s-master03 peerURLs=https://172.16.1.13:2380 clientURLs=https://172.16.1.13:2379 isLeader=false

#檢查ipvs是否啟用
通過ipvsadm可以看到規則

[root@k8s-master01 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.96.0.1:443 rr
  -> 172.16.1.11:6443             Masq    1      0          0         
  -> 172.16.1.12:6443             Masq    1      0          0         
  -> 172.16.1.13:6443             Masq    1      1          0         
TCP  10.96.0.10:53 rr
  -> 10.244.3.2:53                Masq    1      0          0         
  -> 10.244.4.2:53                Masq    1      0          0         
TCP  10.96.0.10:9153 rr
  -> 10.244.3.2:9153              Masq    1      0          0         
  -> 10.244.4.2:9153              Masq    1      0          0         
UDP  10.96.0.10:53 rr
  -> 10.244.3.2:53                Masq    1      0          0         
  -> 10.244.4.2:53                Masq    1      0          0       

通過kubectl log --tail=10 kube-proxy-tqxlq -n kube-system,查看pod日志,可以看到:Using ipvs Proxier.
如果ipvsadm檢查不到規則,而且kube-proxy日志中發現:

can't determine whether to use ipvs proxy, error: IPVS proxier will not be used because the following required kernel modules are not loaded: [ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh]
Using iptables Proxier.

說明ipvs啟用失敗。

向AI問一下細節

免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。

AI

巢湖市| 临高县| 平原县| 宣化县| 蚌埠市| 舟山市| 巴林右旗| 台湾省| 云阳县| 普宁市| 奉贤区| 崇文区| 将乐县| 车险| 高唐县| 金塔县| 延吉市| 顺平县| 大同县| 滨州市| 利津县| 孟州市| 攀枝花市| 南郑县| 西和县| 凤庆县| 澄江县| 六盘水市| 金阳县| 泸州市| 齐河县| 象山县| 观塘区| 横山县| 新田县| 溧水县| 日喀则市| 邵武市| 温宿县| 廉江市| 疏附县|