您好,登錄后才能下訂單哦!
如圖所示Flannel的工作原理可以解釋為:
[root@node01 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2 //安裝依賴包
已加載插件:fastestmirror
base | 3.6 kB 00:00:00
extras | 2.9 kB 00:00:00
...
[root@node01 ~]# yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo //設置阿里云鏡像源
已加載插件:fastestmirror
adding repo from: https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
grabbing file https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo to /etc/yum.repos.d/docker-ce.repo
repo saved to /etc/yum.repos.d/docker-ce.repo
[root@node01 ~]# yum install -y docker-ce //安裝Docker-CE
已加載插件:fastestmirror
docker-ce-stable | 3.5 kB 00:00:00
(1/2): docker-ce-stable/x86_64/updateinfo | 55 B 00:00:01
(2/2): docker-ce-stable/x86_64/primary_db | 37 kB 00:00:01
Loading mirror speeds from cached hostfile
...
[root@node01 ~]# systemctl start docker.service //啟動docker服務
[root@node01 ~]# systemctl enable docker.service //配置開機自啟
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@node01 ~]# tee /etc/docker/daemon.json <<-'EOF' //配置鏡像加速
> {
> "registry-mirrors": ["https://**********.aliyuncs.com"]
> }
> EOF
{
"registry-mirrors": ["https://**********.aliyuncs.com"]
}
[root@node01 ~]# systemctl daemon-reload //重新加載進程
[root@node01 ~]# systemctl restart docker //重啟docker
[root@node01 ~]# vim /etc/sysctl.conf //編輯開啟路由轉發功能
...
# For more information, see sysctl.conf(5) and sysctl.d(5).
net.ipv4.ip_forward=1
:wq
[root@node01 ~]# sysctl -p //重新加載
net.ipv4.ip_forward = 1
[root@node01 ~]# service network restart //重啟網絡
Restarting network (via systemctl): [ 確定 ]
[root@node01 ~]# systemctl restart docker //重啟docker服務
[root@node01 ~]# docker version
Client: Docker Engine - Community //查看docker版本
Version: 19.03.5
API version: 1.40
Go version: go1.12.12
... //docker服務部署完成
[root@master01 etcd-cert]# /opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://192.168.80.12:2379,https://192.168.80.13:2379,https://192.168.80.14:2379" set /coreos.com/network/config '{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}' //寫入分配的子網段到ETCD中,供flannel使用
{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}
[root@master01 etcd-cert]# /opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://192.168.80.12:2379,https://192.168.80.13:2379,https://192.168.80.14:2379" get /coreos.com/network/config //查看是否成功寫入
{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}
[root@master01 etcd-cert]# cd .. //回到k8s目錄
[root@master01 k8s]# ls //查看flannel軟件包是否存在
cfssl.sh etcd-v3.3.10-linux-amd64 kubernetes-server-linux-amd64.tar.gz
etcd-cert etcd-v3.3.10-linux-amd64.tar.gz
etcd.sh flannel-v0.10.0-linux-amd64.tar.gz
[root@master01 k8s]# scp flannel-v0.10.0-linux-amd64.tar.gz flannel.sh root@192.168.80.13:/root //將軟件包拷貝到node01節點
root@192.168.80.13's password:
flannel-v0.10.0-linux-amd64.tar.gz 100% 9479KB 61.1MB/s 00:00
flannel.sh: No such file or directory
[root@master01 k8s]# scp flannel-v0.10.0-linux-amd64.tar.gz flannel.sh root@192.168.80.14:/root //將軟件包拷貝到node02節點
root@192.168.80.14's password:
flannel-v0.10.0-linux-amd64.tar.gz 100% 9479KB 119.3MB/s 00:00
flannel.sh: No such file or directory
node01、node02節點同步操作
[root@node01 ~]# ls //查看軟件包是否成功拷貝
anaconda-ks.cfg flannel-v0.10.0-linux-amd64.tar.gz
[root@node01 ~]# tar zxvf flannel-v0.10.0-linux-amd64.tar.gz //解壓軟件包
flanneld
mk-docker-opts.sh
README.md
[root@node01 ~]# mkdir /opt/kubernetes/{cfg,bin,ssl} -p //遞歸創建k8s工作目錄
[root@node01 ~]# mv mk-docker-opts.sh flanneld /opt/kubernetes/bin/ //移動腳本文件到工作目錄下的bin目錄
[root@node01 ~]# vim flannel.sh //編輯flannel執行腳本 并生成配置文件
#!/bin/bash
ETCD_ENDPOINTS=${1:-"http://127.0.0.1:2379"}
cat <<EOF >/opt/kubernetes/cfg/flanneld
FLANNEL_OPTIONS="--etcd-endpoints=${ETCD_ENDPOINTS} \
-etcd-cafile=/opt/etcd/ssl/ca.pem \
-etcd-certfile=/opt/etcd/ssl/server.pem \
-etcd-keyfile=/opt/etcd/ssl/server-key.pem"
EOF
cat <<EOF >/usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service
[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d / /run/flannel/subnet.env
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable flanneld
systemctl restart flanneld
:wq
[root@node01 ~]# bash flannel.sh https://192.168.80.12:2379,https://192.168.80.13:2379,https://192.168.80.14:2379 //執行flannel腳本文件開啟flannel網絡功能
Created symlink from /etc/systemd/system/multi-user.target.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service.
[root@node01 ~]# vim /usr/lib/systemd/system/docker.service //配置docker啟動腳本連接flannel
...
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
EnvironmentFile=/run/flannel/subnet.env //添加連接運行語句
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/containerd.sock //添加變量
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
...
:wq
[root@node01 ~]# cat /run/flannel/subnet.env //查看docker運行時連接flannel文件
DOCKER_OPT_BIP="--bip=172.17.49.1/24"
DOCKER_OPT_IPMASQ="--ip-masq=false"
DOCKER_OPT_MTU="--mtu=1450"
DOCKER_NETWORK_OPTIONS=" --bip=172.17.49.1/24 --ip-masq=false --mtu=1450" //bip指定啟動時的子網 注意:此處node01與node02指定啟動時的子網IP地址都屬于172.17.0.0/24網段
查看網絡
[root@node01 ~]# systemctl daemon-reload //重新加載進程
[root@node01 ~]# systemctl restart docker //重新啟動docker
[root@node01 ~]# ifconfig //查看網絡信息
docker0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 172.17.49.1 netmask 255.255.255.0 broadcast 172.17.49.255 //docker0網卡IP地址
...
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.80.13 netmask 255.255.255.0 broadcast 192.168.80.255
...
flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1450
inet 172.17.49.0 netmask 255.255.255.255 broadcast 0.0.0.0 //flannel網卡地址
...
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
...
node02服務器操作
[root@node02 ~]# ifconfig
docker0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 172.17.63.1 netmask 255.255.255.0 broadcast 172.17.63.255 //docker網卡信息
...
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.80.14 netmask 255.255.255.0 broadcast 192.168.80.255
...
flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1450
inet 172.17.63.0 netmask 255.255.255.255 broadcast 0.0.0.0 //flannel網卡信息
...
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
...
[root@node02 ~]# ping 172.17.49.1 //使用ping命令測試網絡是否互通
PING 172.17.49.1 (172.17.49.1) 56(84) bytes of data.
64 bytes from 172.17.49.1: icmp_seq=1 ttl=64 time=0.344 ms
64 bytes from 172.17.49.1: icmp_seq=2 ttl=64 time=0.333 ms
64 bytes from 172.17.49.1: icmp_seq=3 ttl=64 time=0.346 ms
^C
--- 172.17.49.1 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2000ms
rtt min/avg/max/mdev = 0.333/0.341/0.346/0.005 ms
[root@node01 ~]# docker run -it centos:7 /bin/bash //運行docker鏡像
Unable to find image 'centos:7' locally
7: Pulling from library/centos
ab5ef0e58194: Pull complete
Digest: sha256:4a701376d03f6b39b8c2a8f4a8e499441b0d567f9ab9d58e4991de4472fb813c
Status: Downloaded newer image for centos:7
[root@e8ee45a4fd28 /]# yum install net-tools -y //容器中安裝網絡工具
Loaded plugins: fastestmirror, ovl
Determining fastest mirrors
* base: mirrors.163.com
* extras: mirrors.163.com
...
node01服器操作
[root@e8ee45a4fd28 /]# ifconfig //查看網卡信息
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1450
inet 172.17.49.2 netmask 255.255.255.0 broadcast 172.17.49.255
...
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
...
[root@47aa8b55a61a /]# ifconfig //查看網卡信息
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1450
inet 172.17.63.2 netmask 255.255.255.0 broadcast 172.17.63.255
...
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
...
[root@47aa8b55a61a /]# ping 172.17.49.2 //node02服務器中docker容器使用ping命令測試與node01服務器中docker是否可以通信
PING 172.17.49.2 (172.17.49.2) 56(84) bytes of data.
64 bytes from 172.17.49.2: icmp_seq=1 ttl=62 time=0.406 ms
64 bytes from 172.17.49.2: icmp_seq=2 ttl=62 time=0.377 ms
64 bytes from 172.17.49.2: icmp_seq=3 ttl=62 time=0.389 ms
64 bytes from 172.17.49.2: icmp_seq=4 ttl=62 time=0.356 ms
^C
--- 172.17.49.2 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3001ms
rtt min/avg/max/mdev = 0.356/0.382/0.406/0.018 ms //成功通信
免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。