|
一、docker ovelay网络部署
1.ovelay网络说明
Docerk overlay 网络需要一个 key-value 数据库用于保存网络状态信息,包括 Network、Endpoint、IP 等。Consul、Etcd 和 ZooKeeper 都是 Docker 支持的 key-vlaue 软件。
consul是一种key-value数据库,可以用它存储系统的状态信息等,当然这里我们并不需要写代码,只需要安装consul,之后docker会自动进行状态存储等。最简单的安装consul数据库的方法是直接使用 docker 运行 consul 容器。
1.2.测试环境说明
192.168.2.120 Consul
192.168.2.121 linux dm1
192.168.2.122 linux dm2
1.2.1.部署Consul
搜索可用Consul
# docker search consul 安装Consul:
# docker run -d -p 8500:8500 -h consul --name "consul" progrium/consul -server -bootstrap
355234f445f2082d999acc393b52ea050c2b8d4efe7139a73b0c7f31bb8c7eaf 查看页面:
1.2.2.配置dm1和dm2存储指向120
# vim /etc/systemd/system/docker.service.d/10-machine.conf [Service]
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --storage-driver overlay2 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provide
r=generic --cluster-store=consul://192.168.2.120:8500 --cluster-advertise=ens32:2376
Environment= #--cluster-store=consul://192.168.2.120:8500
#--cluster-advertise=ens32:2376
1.3.两台主机自动注册到consul
1.4.在dm1上创建overlay网络
1.4.1.创建
# docker network create -d overlay ckl_ov
e67ad2a1128c9f5fe19949167a413474cadd519ef2753d428308183050b04495# docker network ls
NETWORK ID NAME DRIVER SCOPE
18e725b2d314 bridge bridge local
e67ad2a1128c ckl_ov overlay global
32392bea0cf5 host host local
c245d2d564f7 none null local 1.4.2.查看overlay网络详情:
# docker network inspect e67ad2a1128c
[
{
"Name": "ckl_ov",
"Id": "e67ad2a1128c9f5fe19949167a413474cadd519ef2753d428308183050b04495",
"Created": "2018-12-19T22:34:35.820299362-05:00",
"Scope": "global",
"Driver": "overlay",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "10.0.0.0/24", #ovelay分配的网络地址段
"Gateway": "10.0.0.1" #overlay网关
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {},
"Options": {},
"Labels": {}
}
] 1.4.3.在dm2上查看网络
# docker network ls
NETWORK ID NAME DRIVER SCOPE
245f119f53d2 bridge bridge local
e67ad2a1128c ckl_ov overlay global
fe7e0a7e103c host host local
0fc752ba16e0 none null local #也可以看到创建的ovelay网络,因为创建的网络已经添加到consul
创建网络的ID:
1.4.5.在dm1上运行容器指定创建的网络
# docker run -itd --network ckl_ov --name "ckl1" centos /bin/bash
305765f57e343bb82d8ee6acbc6c2b9582b4777ff759309a456370f981906eab 查看容器网络:
# docker exec -it 305765f57e34 /bin/bash
[root@305765f57e34 /]# ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
23: eth0@if24: mtu 1450 qdisc noqueue state UP group default
link/ether 02:42:0a:00:00:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 10.0.0.2/24 brd 10.0.0.255 scope global eth0 #overlay 网络
valid_lft forever preferred_lft forever
25: eth1@if26: mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:12:00:02 brd ff:ff:ff:ff:ff:ff link-netnsid 1
inet 172.18.0.2/16 brd 172.18.255.255 scope global eth1
valid_lft forever preferred_lft forever
[root@305765f57e34 /]# ping www.123.com
PING www.123.com (61.132.13.130) 56(84) bytes of data.
64 bytes from 61.132.13.130 (61.132.13.130): icmp_seq=1 ttl=115 time=11.5 ms 1.4.6.查看网络
# docker network ls
NETWORK ID NAME DRIVER SCOPE
18e725b2d314 bridge bridge local
e67ad2a1128c ckl_ov overlay global
fad7ad56bf24 docker_gwbridge bridge local #overlay通过新建的docker_gwbridge为容器访问外网
32392bea0cf5 host host local
c245d2d564f7 none null local 1.5.在dm2上创建容器
1.5.1.创建容器指定网络
# docker run -itd --network=ckl_ov --name "ckl2" centos /bin/bash 1.5.2.进入容器测试连通
# docker exec -it 432aa8189729 /bin/bash
[root@432aa8189729 /]# ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
7: eth0@if8: mtu 1450 qdisc noqueue state UP group default
link/ether 02:42:0a:00:00:03 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 10.0.0.3/24 brd 10.0.0.255 scope global eth0 #overlay网络分配的地址
valid_lft forever preferred_lft forever
10: eth1@if11: mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:12:00:02 brd ff:ff:ff:ff:ff:ff link-netnsid 1
inet 172.18.0.2/16 brd 172.18.255.255 scope global eth1
valid_lft forever preferred_lft forever
# ping ckl1
PING ckl1 (10.0.0.2) 56(84) bytes of data.
64 bytes from ckl1.ckl_ov (10.0.0.2): icmp_seq=1 ttl=64 time=1.39 ms
64 bytes from ckl1.ckl_ov (10.0.0.2): icmp_seq=2 ttl=64 time=0.645 ms 1.6.在dm1创建新的overlay网络
1.6.1.创建网络
[root@dm1 ~]# docker network create -d overlay ckl_ov2
3523f9e585c6da956e378f91f749cdf3a84fa22dc4d19edec3220ac3d5a91a74
[root@dm1 ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
18e725b2d314 bridge bridge local
e67ad2a1128c ckl_ov overlay global
3523f9e585c6 ckl_ov2 overlay global
fad7ad56bf24 docker_gwbridge bridge local
32392bea0cf5 host host local
c245d2d564f7 none null local[root@dm1 ~]# docker network inspect 3523f9e585c6
[
{
"Name": "ckl_ov2",
"Id": "3523f9e585c6da956e378f91f749cdf3a84fa22dc4d19edec3220ac3d5a91a74",
"Created": "2018-12-20T00:31:33.14717792-05:00",
"Scope": "global",
"Driver": "overlay",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "10.0.1.0/24",
"Gateway": "10.0.1.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {},
"Options": {},
"Labels": {}
}
] 1.6.2.在dm1新建容器指向新的overlay网络
[root@dm1 ~]# docker run -itd --network ckl_ov --name "ckl3" centos /bin/bash
51b2a7ea50e848cfd7061501f1893689798e6ee9b0f701fe2bb6c7d288d13d1f 1.6.3.测试连通
# ping ckl1
PING ckl1 (10.0.0.2) 56(84) bytes of data.
64 bytes from ckl1.ckl_ov (10.0.0.2): icmp_seq=1 ttl=64 time=2.12 ms
64 bytes from ckl1.ckl_ov (10.0.0.2): icmp_seq=2 ttl=64 time=0.191 ms
64 bytes from ckl1.ckl_ov (10.0.0.2): icmp_seq=3 ttl=64 time=0.170 ms #两个overlay网络之间可以通信
二、macvlan网络
2.1.在dm1和dm2开启混杂模式
2.1.1.dm1
[root@dm1 ~]# ip link set ens32 promisc on
[root@dm1 ~]# ip link show ens32
2: ens32: mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
link/ether 00:0c:29:21:8a:4f brd ff:ff:ff:ff:ff:ff 2.1.2.dm2
[root@dm2 ~]# ip link set ens32 promisc on
[root@dm2 ~]# ip link show ens32
2: ens32: mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
link/ether 00:0c:29:70:8b:c9 brd ff:ff:ff:ff:ff:ff 2.2.创建macvlan网络
2.2.1.在dm1上创建网络:
Bridge mode:
[root@dm1 ~]# docker network create -d macvlan \
> --subnet=172.16.86.0/24 \
> --gateway=172.16.86.1 \
> -o parent=ens32 mac_net
c6ebc70a4529fc1cf0491e4218eca32b078c92a28f352288bc162aa30f5a5ceb[root@dm1 ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
18e725b2d314 bridge bridge local
e67ad2a1128c ckl_ov overlay global
3523f9e585c6 ckl_ov2 overlay global
fad7ad56bf24 docker_gwbridge bridge local
32392bea0cf5 host host local
c6ebc70a4529 mac_net macvlan local #macvlan网络
c245d2d564f7 none null local[root@dm1 ~]# docker network inspect mac_net
[
{
"Name": "mac_net",
"Id": "c6ebc70a4529fc1cf0491e4218eca32b078c92a28f352288bc162aa30f5a5ceb",
"Created": "2018-12-20T01:44:41.349015886-05:00",
"Scope": "local",
"Driver": "macvlan",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "172.16.86.0/24", #macvlan网络段
"Gateway": "172.16.86.1" #macvlan网关
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {},
"Options": {
"parent": "ens32"
},
"Labels": {}
}
] 2.2.2.在dm2上创建网络
[root@dm2 ~]# docker network create -d macvlan --subnet=172.16.86.0/24 --gateway=172.16.86.1 -o parent=ens32 mac_net
bf8aaf2c64fa14d4ac1cc25dc9ef4b308524608b6a6d9f136bfce48d3819ba9b 运行容器时候一定要指定IP,否则随机分配会冲突
2.2.3.在dm1上运行容器:
[root@dm1 ~]# docker run -itd --network=mac_net --ip=172.16.86.10 --name "cklm1" centos /bin/bash
4497b2e3144ac26eec5e268538e4045cf5ff88633082d0667262aa6a349de951 2.2.4.在dm2上运行容器:
[root@dm2 ~]# docker run -itd --network=mac_net --ip=172.16.86.11 --name "cklm2" centos /bin/bash
82bc31136367ce9bd9bb5bc590bfd781b6e7a0830dd9323f09b457833eb7aa7b 2.2.5.在dm1上容器访问dm2
[root@dm1 ~]# docker exec -it 4497b2e3144a /bin/bash
[root@4497b2e3144a /]#
[root@4497b2e3144a /]#
[root@4497b2e3144a /]# ping 172.16.86.10
PING 172.16.86.10 (172.16.86.10) 56(84) bytes of data.
64 bytes from 172.16.86.10: icmp_seq=1 ttl=64 time=0.107 ms
64 bytes from 172.16.86.10: icmp_seq=2 ttl=64 time=0.103 ms
[root@4497b2e3144a /]# ping www.163.com
ping: www.163.com: Name or service not known #macvlan的容器地址互通,但不能上网
3.配置多个macvlan网络
3.1.添加子接口
在dm1和dm2上添加子接口
3.1.1.在dm1上配置子接口:
[root@dm1 /]# cat /etc/sysconfig/network-scripts/ifcfg-ens32.10
TYPE=Ethernet
BOOTPROTO=static
NAME=ens32.10
DEVICE=ens32.10
ONBOOT=yes
[root@dm1 /]# cat /etc/sysconfig/network-scripts/ifcfg-ens32.20
TYPE=Ethernet
BOOTPROTO=static
NAME=ens32.20
DEVICE=ens32.20
ONBOOT=yes 重启网络:
[root@dm1 network-scripts]# systemctl restart network.service 3.1.2.在dm2上配置子接口
[root@dm2 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens32.10
TYPE=Ethernet
BOOTPROTO=static
NAME=ens32.10
DEVICE=ens32.10
ONBOOT=yes
[root@dm2 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens32.20
TYPE=Ethernet
BOOTPROTO=static
NAME=ens32.20
DEVICE=ens32.20
ONBOOT=yes 重启网络:
[root@dm2 ~]# systemctl restart network.service 3.2.添加macvlan网络
3.2.1.在dm1上添加macvlan网络
[root@dm1 ~]# docker network create -d macvlan --subnet=172.16.88.0/24 --gateway=172.16.88.1 -o parent=ens32.10 mac_net1
cae5b58f2011a8847ea9e8ae733196fe14a172a1b6207aa238b7a3ccc71a98ad[root@dm1 ~]# docker network create -d macvlan --subnet=172.16.99.0/24 --gateway=172.16.99.1 -o parent=ens32.20 mac_net2
08d78132536ed836e6a774b9ebd4e4197f7505f8b49b9fab86933d53150a1bd2 3.2.2.在dm2上添加macvlan网络
[root@dm2 ~]# docker network create -d macvlan --subnet=172.16.88.0/24 --gateway=172.16.88.1 -o parent=ens32.10 mac_net1
86dd0b17055628ea05764b01b9be6d93e16945d0224073e951eb6010daff533f[root@dm2 ~]# docker network create -d macvlan --subnet=172.16.99.0/24 --gateway=172.16.99.1 -o parent=ens32.20 mac_net2
361e981d778d83b52933c784adf6d8e27d1251516cbacfb19f0d073ede86affd 3.3.运行容器
3.3.1.在dm1上运行容器
[root@dm1 /]# docker run -itd --network=mac_net1 --ip=172.16.88.10 --name "cklm1" centos /bin/bash
750ff692b753fc427a7f8427b2ecba61d512901161bdd7b730de51c89997ca05[root@dm1 /]# docker run -itd --network=mac_net2 --ip=172.16.99.10 --name "cklm2" centos /bin/bash
506307d9eb704843fefcdf57c34c98a9ac7ae29c584385d275822dd4a0f61601 3.3.2.在dm2上运行容器
[root@dm2 ~]# docker run -itd --network=mac_net1 --ip=172.16.88.11 --name "cklm3" centos /bin/bash
d08d6155ddb1c8c4dfa97eed061372dac6a097cf97a5cd8d2732587182acdd4e[root@dm2 ~]# docker run -itd --network=mac_net2 --ip=172.16.99.11 --name "cklm4" centos /bin/bash
5aafd07ba3de969442dc0fa779420e9ff35bc091b4323e9b75935f4ffd4081b1 3.4.测试连通
3.4.1.在dm1上:
[root@dm1 /]# docker exec -it cklm1 /bin/bash
[root@750ff692b753 /]#
[root@750ff692b753 /]# ping -c 3 172.16.88.11
PING 172.16.88.11 (172.16.88.11) 56(84) bytes of data.
64 bytes from 172.16.88.11: icmp_seq=1 ttl=64 time=0.362 ms
64 bytes from 172.16.88.11: icmp_seq=2 ttl=64 time=0.396 ms
64 bytes from 172.16.88.11: icmp_seq=3 ttl=64 time=0.289 ms
--- 172.16.88.11 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2000ms
rtt min/avg/max/mdev = 0.289/0.349/0.396/0.044 ms
[root@750ff692b753 /]# ping -c 3 172.16.99.11
PING 172.16.99.11 (172.16.99.11) 56(84) bytes of data.
From 172.16.88.10 icmp_seq=1 Destination Host Unreachable
From 172.16.88.10 icmp_seq=2 Destination Host Unreachable
From 172.16.88.10 icmp_seq=3 Destination Host Unreachable 3.4.2.在dm2上:
[root@dm2 ~]# docker exec -it cklm4 /bin/bash
[root@5aafd07ba3de /]# ping -c3 172.16.99.10
PING 172.16.99.10 (172.16.99.10) 56(84) bytes of data.
64 bytes from 172.16.99.10: icmp_seq=1 ttl=64 time=0.298 ms
64 bytes from 172.16.99.10: icmp_seq=2 ttl=64 time=0.290 ms
64 bytes from 172.16.99.10: icmp_seq=3 ttl=64 time=0.236 ms
--- 172.16.99.10 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2000ms
rtt min/avg/max/mdev = 0.236/0.274/0.298/0.033 ms
[root@5aafd07ba3de /]#
[root@5aafd07ba3de /]# ping -c3 172.16.88.10
PING 172.16.88.10 (172.16.88.10) 56(84) bytes of data.
From 172.16.99.11 icmp_seq=1 Destination Host Unreachable
From 172.16.99.11 icmp_seq=2 Destination Host Unreachable
From 172.16.99.11 icmp_seq=3 Destination Host Unreachable
--- 172.16.88.10 ping statistics ---
3 packets transmitted, 0 received, +3 errors, 100% packet loss, time 1999ms
pipe 3 #连通不同网段的macvlan没有搞定
三、flannel网络
flannel可以为容器提供网络服务。 其模型为全部的容器使用一个network,然后在每个host上从network中划分一个子网subnet。 为host上的容器创建网络时,从subnet中划分一个ip给容器。
etcd负责维护共享数据,统一管理各节点的网络标识和子网编码;
部署环境:
192.168.2.120 etcd
192.168.2.121 dm1
192.168.2.122 dm2
3.1.部署etcd在120上
3.1.1.安装etcd
[root@docker-2-120 ~]# yum install etcd 3.1.2.启动etcd
[root@docker-2-120 ~]# nohup etcd --listen-client-urls 'http://192.168.2.120:2379' --advertise-client-urls 'http://192.168.2.120:2379' &
[1] 16496 3.1.2.测试etcd
[root@docker-2-120 ~]# etcdctl --endpoints 'http://192.168.2.120:2379' set name "ckl"
ckl
[root@docker-2-120 ~]#
[root@docker-2-120 ~]# etcdctl --endpoints 'http://192.168.2.120:2379' get name
ckl 3.1.3.添加网络
[root@docker-2-120 ~]# etcdctl --endpoints 'http://192.168.2.120:2379' mk /atomic.io/network/config '{ "Network": "10.8.0.0/16" }' #网段可以自定义,容器ip来自这个网段的获取
3.2.安装flannel
3.2.1.在dm1上安装
[root@dm1 network-scripts]# yum install flannel 3.2.2.在dm1上修改配置
[root@dm1 network-scripts]# vim /etc/sysconfig/flanneld
# Flanneld configuration options
# etcd url location. Point this to the server where etcd runs
FLANNEL_ETCD_ENDPOINTS="http://192.168.2.120:2379"
# etcd config key. This is the configuration key that flannel queries
# For address range assignment
FLANNEL_ETCD_PREFIX="/atomic.io/network"
# Any additional options that you want to pass
#FLANNEL_OPTIONS="" 3.2.3.在dm2上安装flannel
[root@dm2 ~]# yum install flannel 3.2.4.在dm2上修改配置
[root@dm2 ~]# vim /etc/sysconfig/flanneld
# Flanneld configuration options
# etcd url location. Point this to the server where etcd runs
FLANNEL_ETCD_ENDPOINTS="http://192.168.2.120:2379"
# etcd config key. This is the configuration key that flannel queries
# For address range assignment
FLANNEL_ETCD_PREFIX="/atomic.io/network"
# Any additional options that you want to pass
#FLANNEL_OPTIONS="" 3.2.5.启动dm1的flanneld
[root@dm1 network-scripts]# systemctl start flanneld.service
[root@dm1 /]# ps -ef | grep flannel
root 17030 1 0 02:27 ? 00:00:00 /usr/bin/flanneld -etcd-endpoints=http://192.168.2.120:2379 -etcd-prefix=/atomic.io/network
root 17495 8904 0 02:53 pts/0 00:00:00 grep --color=auto flannel 查看网络地址:
[root@dm1 network-scripts]# ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens32: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:21:8a:4f brd ff:ff:ff:ff:ff:ff
inet 192.168.2.121/24 brd 192.168.2.255 scope global noprefixroute ens32
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe21:8a4f/64 scope link
valid_lft forever preferred_lft forever
3: docker0: mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:a9:aa:f0:d0 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
inet6 fe80::42:a9ff:feaa:f0d0/64 scope link
valid_lft forever preferred_lft forever
4: docker_gwbridge: mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:49:38:91:d5 brd ff:ff:ff:ff:ff:ff
inet 172.18.0.1/16 brd 172.18.255.255 scope global docker_gwbridge
valid_lft forever preferred_lft forever
5: ens32.20@ens32: mtu 1500 qdisc noqueue state UP group default
link/ether 00:0c:29:21:8a:4f brd ff:ff:ff:ff:ff:ff
inet6 fe80::20c:29ff:fe21:8a4f/64 scope link
valid_lft forever preferred_lft forever
6: ens32.10@ens32: mtu 1500 qdisc noqueue state UP group default
link/ether 00:0c:29:21:8a:4f brd ff:ff:ff:ff:ff:ff
inet6 fe80::20c:29ff:fe21:8a4f/64 scope link
valid_lft forever preferred_lft forever
12: flannel0: mtu 1472 qdisc pfifo_fast state UNKNOWN group default qlen 500
link/none
inet 10.8.95.0/16 scope global flannel0 #分配的网段
valid_lft forever preferred_lft forever
inet6 fe80::4e1c:dc00:f84:7394/64 scope link flags 800
valid_lft forever preferred_lft forever
14: veth6296bc2@if13: mtu 1500 qdisc noqueue master docker0 state UP group default
link/ether ee:1f:96:08:9d:58 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet6 fe80::ec1f:96ff:fe08:9d58/64 scope link
valid_lft forever preferred_lft forever 3.2.6.启动dm2的flanneld
[root@dm2 ~]# systemctl start flanneld.service
[root@dm2 ~]# ps -ef | grep flannel
root 16150 1 0 02:25 ? 00:00:00 /usr/bin/flanneld -etcd-endpoints=http://192.168.2.120:2379 -etcd-prefix=/atomic.io/network
root 16345 12334 0 02:41 pts/0 00:00:00 grep --color=auto flannel 查看网络地址:
[root@dm2 ~]# ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens32: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:70:8b:c9 brd ff:ff:ff:ff:ff:ff
inet 192.168.2.122/24 brd 192.168.2.255 scope global noprefixroute ens32
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe70:8bc9/64 scope link
valid_lft forever preferred_lft forever
3: docker_gwbridge: mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:57:6b:37:bc brd ff:ff:ff:ff:ff:ff
inet 172.18.0.1/16 brd 172.18.255.255 scope global docker_gwbridge
valid_lft forever preferred_lft forever
4: docker0: mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:53:fc:a9:5a brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
5: ens32.20@ens32: mtu 1500 qdisc noqueue state UP group default
link/ether 00:0c:29:70:8b:c9 brd ff:ff:ff:ff:ff:ff
inet6 fe80::20c:29ff:fe70:8bc9/64 scope link
valid_lft forever preferred_lft forever
6: ens32.10@ens32: mtu 1500 qdisc noqueue state UP group default
link/ether 00:0c:29:70:8b:c9 brd ff:ff:ff:ff:ff:ff
inet6 fe80::20c:29ff:fe70:8bc9/64 scope link
valid_lft forever preferred_lft forever
8: flannel0: mtu 1472 qdisc pfifo_fast state UNKNOWN group default qlen 500
link/none
inet 10.8.12.0/16 scope global flannel0 #分配的网段
valid_lft forever preferred_lft forever
inet6 fe80::ff5:166d:a8c2:11c8/64 scope link flags 800
valid_lft forever preferred_lft forever 3.3.配置docker连接flannel
3.3.1.在dm1上查看flannel信息
[root@dm1 /]# cat /run/flannel/subnet.env
FLANNEL_NETWORK=10.8.0.0/16
FLANNEL_SUBNET=10.8.95.1/24
FLANNEL_MTU=1472
FLANNEL_IPMASQ=false 3.3.2.dm1配置docker启动参数
[root@dm1 /]# cat /etc/systemd/system/docker.service.d/10-machine.conf
[Service]
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --storage-driver overlay2 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=generic --bip=10.8.95.1/24 --mtu=1472
Environment= 重启docker:
[root@dm1 /]# systemctl daemon-reload
[root@dm1 /]# systemctl restart docker.service 3.3.3.在dm2上查看flannel信息
[root@dm2 ~]# cat /run/flannel/subnet.env
FLANNEL_NETWORK=10.8.0.0/16
FLANNEL_SUBNET=10.8.12.1/24
FLANNEL_MTU=1472
FLANNEL_IPMASQ=false 3.3.4.dm2配置docker启动参数
[root@dm2 ~]# vim /etc/systemd/system/docker.service.d/10-machine.conf
[Service]
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --storage-driver overlay2 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provide
r=generic --bip=10.8.12.1/24 --mtu=1472
Environment= 重启docker:
[root@dm2 ~]# systemctl daemon-reload
[root@dm2 ~]# systemctl restart docker.service 3.4.运行容器
3.4.1.在dm1上运行容器
[root@dm1 /]# docker run -itd --name "cklf1" centos /bin/bash
0a2a6a84b3f6bb37d5336b986999dab900813a92c1ed74218b5ef98988193d51 3.4.2.在dm2上运行容器
[root@dm2 ~]# docker run -itd --name "cklf2" centos /bin/bash
f78f96184010beeaefa807216304de4a4ed3d8ed60db692e75564e677aa266c0 3.4.3.在dm1容器访问
[root@dm1 /]# docker exec -it 0a2a6a84b3f6 /bin/bash
[root@0a2a6a84b3f6 /]# ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
17: eth0@if18: mtu 1472 qdisc noqueue state UP group default
link/ether 02:42:0a:08:5f:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 10.8.95.2/24 brd 10.8.95.255 scope global eth0
valid_lft forever preferred_lft forever
[root@0a2a6a84b3f6 /]#
[root@0a2a6a84b3f6 /]#
[root@0a2a6a84b3f6 /]# ping -c3 10.8.12.2 #访问容器cklf2
PING 10.8.12.2 (10.8.12.2) 56(84) bytes of data.
64 bytes from 10.8.12.2: icmp_seq=1 ttl=60 time=1.36 ms
64 bytes from 10.8.12.2: icmp_seq=2 ttl=60 time=0.855 ms
64 bytes from 10.8.12.2: icmp_seq=3 ttl=60 time=0.822 ms
--- 10.8.12.2 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2002ms
rtt min/avg/max/mdev = 0.822/1.015/1.368/0.249 ms 3.4.4.在dm2容器上访问
[root@dm2 ~]# docker exec -it f78f96184010 /bin/bash
[root@f78f96184010 /]#
[root@f78f96184010 /]#
[root@f78f96184010 /]# ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
9: eth0@if10: mtu 1472 qdisc noqueue state UP group default
link/ether 02:42:0a:08:0c:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 10.8.12.2/24 brd 10.8.12.255 scope global eth0
valid_lft forever preferred_lft forever
[root@f78f96184010 /]# ping -c3 10.8.95.2 #访问容器cklf1
PING 10.8.95.2 (10.8.95.2) 56(84) bytes of data.
64 bytes from 10.8.95.2: icmp_seq=1 ttl=60 time=1.18 ms
64 bytes from 10.8.95.2: icmp_seq=2 ttl=60 time=1.01 ms
64 bytes from 10.8.95.2: icmp_seq=3 ttl=60 time=0.886 ms
--- 10.8.95.2 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2002ms
rtt min/avg/max/mdev = 0.886/1.028/1.185/0.125 ms 四、weave网络
4.1.weave说明
Weave不需要集中式的key-value存储,所以安装和运行都很简单。直接把Weave二进制文件下载到系统中就可以了。
4.2.安装weave
4.2.1.在dm1安装weave
[root@dm1 /]# curl -L git.io/weave -o /usr/local/bin/weave
[root@dm1 /]# chmod a+x /usr/local/bin/weave
[root@dm1 /]# weave version
weave script 2.5.0
weave 2.5.0 4.2.2.在dm2安装weave
[root@dm2 ~]# curl -L git.io/weave -o /usr/local/bin/weave
[root@dm2 ~]# chmod a+x /usr/local/bin/weave
[root@dm2 ~]# weave version
weave script 2.5.0
weave 2.5.0 4.3.启动weave
4.3.1.在dm1上启动wave,wave组建以容器方式运行
[root@dm1 /]# weave launch
[root@dm1 /]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
57b5b44f6216 weaveworks/weave:2.5.0 "/home/weave/weaver …" 54 seconds ago Up 54 seconds weave 4.3.2.查看weave 网络
[root@dm1 /]# docker network ls
NETWORK ID NAME DRIVER SCOPE
c691fa6d5ddc bridge bridge local
fad7ad56bf24 docker_gwbridge bridge local
32392bea0cf5 host host local
c6ebc70a4529 mac_net macvlan local
cae5b58f2011 mac_net1 macvlan local
08d78132536e mac_net2 macvlan local
c245d2d564f7 none null local
910414b860d4 weave weavemesh local #weave网络 4.3.3.查看weave网络地址段
[root@dm1 /]# docker network inspect 910414b860d4
[
{
"Name": "weave",
"Id": "910414b860d45e3abf9d7ab0a58afa0317132826db8a163c9fb4e4ecae388562",
"Created": "2018-12-21T04:26:02.311309821-05:00",
"Scope": "local",
"Driver": "weavemesh",
"EnableIPv6": false,
"IPAM": {
"Driver": "weavemesh",
"Options": null,
"Config": [
{
"Subnet": "10.32.0.0/12" #地址段
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {},
"Options": {
"works.weave.multicast": "true"
},
"Labels": {}
}
] 4.4.运行容器
[root@dm1 /]# weave env
export DOCKER_HOST=unix:///var/run/weave/weave.sock
#将命令发给weave proxy,如果不需要了执行:# eval $(weave env --restore)
[root@dm1 /]# eval $(weave env) #
[root@dm1 /]# docker run -itd --name "cklw1" centos
70d02b47d3b802ac5565a7ed696590f685ac46c07bea1f7d98368bf4d784a34c 4.5.容器网络分析
4.5.1.查看容器ip:
[root@dm1 /]# docker exec -it 70d02b47d3b8 /bin/bash
[root@cklw1 /]# ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
27: eth0@if28: mtu 1472 qdisc noqueue state UP group default
link/ether 02:42:0a:08:5f:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 10.8.95.2/24 brd 10.8.95.255 scope global eth0 #默认连接docker0的bridge网络
valid_lft forever preferred_lft forever
29: ethwe@if30: mtu 1376 qdisc noqueue state UP group default
link/ether 26:fd:9a:f7:94:8e brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 10.32.0.1/12 brd 10.47.255.255 scope global ethwe #连接到主机的weave网络接口
valid_lft forever preferred_lft forever 4.5.2.查看主机网络
[root@dm1 /]# ip link show
....
30: vethwepl22309@if29: mtu 1376 qdisc noqueue master weave state UP mode DEFAULT group default
link/ether 26:56:9d:93:7f:7e brd ff:ff:ff:ff:ff:ff link-netnsid 0 #这个接口是与容器相连的接口
4.5.3.运行第二个容器
[root@dm1 /]# docker run -itd --name "cklw2" centos
bd3273f65ae2a73035a2851e8b7f79ecaccaef1fc276112789ef0df52d22afed 测试容器间通信:
[root@dm1 /]# docker exec -it bd3273f65ae2 /bin/bash
[root@cklw2 /]# ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
31: eth0@if32: mtu 1472 qdisc noqueue state UP group default
link/ether 02:42:0a:08:5f:03 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 10.8.95.3/24 brd 10.8.95.255 scope global eth0 #cklw2的ip地址
valid_lft forever preferred_lft forever
33: ethwe@if34: mtu 1376 qdisc noqueue state UP group default
link/ether 2a:9b:c3:34:67:20 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 10.32.0.2/12 brd 10.47.255.255 scope global ethwe
valid_lft forever preferred_lft forever
[root@cklw2 /]# ping -c3 cklw1
PING cklw1.weave.local (10.32.0.1) 56(84) bytes of data.
64 bytes from cklw1.weave.local (10.32.0.1): icmp_seq=1 ttl=64 time=0.189 ms
64 bytes from cklw1.weave.local (10.32.0.1): icmp_seq=2 ttl=64 time=0.134 ms
64 bytes from cklw1.weave.local (10.32.0.1): icmp_seq=3 ttl=64 time=0.120 ms
--- cklw1.weave.local ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2000ms
rtt min/avg/max/mdev = 0.120/0.147/0.189/0.032 ms 4.6.在主机dm2上运行weave网络及容器
4.6.1.创建weave
[root@dm2 ~]# curl -L git.io/weave -o /usr/local/bin/weave
[root@dm2 ~]# chmod a+x /usr/local/bin/weave 4.6.2.启动weave指向dm1的weave:
[root@dm2 ~]# weave launch 192.168.2.121 4.6.3运行容器
[root@dm2 ~]# docker run -itd --name "cklw3" centos
756802eec3034ba8ef8149430363bd6de3b1802023932b4e9c2210b87c8c437e 4.6.4.测试容器间连通
[root@dm2 ~]# docker exec -it 756802eec303 /bin/bash
[root@cklw3 /]# ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
19: eth0@if20: mtu 1472 qdisc noqueue state UP group default
link/ether 02:42:0a:08:0c:03 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 10.8.12.3/24 brd 10.8.12.255 scope global eth0 #获取到的IP地址
valid_lft forever preferred_lft forever
21: ethwe@if22: mtu 1376 qdisc noqueue state UP group default
link/ether 52:75:7f:d1:32:70 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 10.44.0.0/12 brd 10.47.255.255 scope global ethwe
valid_lft forever preferred_lft forever
[root@cklw3 /]#
[root@cklw3 /]# ping -c3 cklw1 #测试连通cklw1
PING cklw1.weave.local (10.32.0.1) 56(84) bytes of data.
64 bytes from cklw1.weave.local (10.32.0.1): icmp_seq=1 ttl=64 time=1.96 ms
64 bytes from cklw1.weave.local (10.32.0.1): icmp_seq=2 ttl=64 time=0.514 ms
64 bytes from cklw1.weave.local (10.32.0.1): icmp_seq=3 ttl=64 time=0.538 ms
--- cklw1.weave.local ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2001ms
rtt min/avg/max/mdev = 0.514/1.006/1.966/0.678 ms
[root@cklw3 /]#
[root@cklw3 /]# ping -c3 cklw2 #测试连通cklw2
PING cklw2.weave.local (10.32.0.2) 56(84) bytes of data.
64 bytes from cklw2.weave.local (10.32.0.2): icmp_seq=1 ttl=64 time=1.67 ms
64 bytes from cklw2.weave.local (10.32.0.2): icmp_seq=2 ttl=64 time=0.849 ms
64 bytes from cklw2.weave.local (10.32.0.2): icmp_seq=3 ttl=64 time=0.604 ms
--- cklw2.weave.local ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2001ms
rtt min/avg/max/mdev = 0.604/1.041/1.672/0.458 ms
[root@cklw3 /]# 4.7.weave网络指定
weave默认使用的网段是10.32.0.0/12,也就是从10.32.0.0到10.47.255.255。如果想实现容器间网络隔离,如下
4.7.1.运行容器指定ip段
[root@dm1 /]# docker run -itd -e WEAVE_CIDR=net:10.32.3.0/24 --name "cklwe1" centos
7a4b56c6c3f602928b76b47d6ccf48457cab09f0e4532699b27c430a764fff3e
[root@dm1 /]# docker run -itd -e WEAVE_CIDR=net:10.32.4.0/24 --name "cklwe2" centos
56f699161d30e78c398d87d033c221d22cc6dfee7e6813023a79a4a31c7d69a8 4.7.2.测试容器连通
[root@dm1 /]# docker exec -it 7a4b56c6c3f6 /bin/bash
[root@cklwe1 /]# ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
60: eth0@if61: mtu 1472 qdisc noqueue state UP group default
link/ether 02:42:0a:08:5f:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 10.8.95.2/24 brd 10.8.95.255 scope global eth0
valid_lft forever preferred_lft forever
62: ethwe@if63: mtu 1376 qdisc noqueue state UP group default
link/ether 26:8b:66:b5:2d:7e brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 10.32.3.1/24 brd 10.32.3.255 scope global ethwe
valid_lft forever preferred_lft forever
[root@cklwe1 /]#
[root@cklwe1 /]# ping 10.32.4.1
PING 10.32.4.1 (10.32.4.1) 56(84) bytes of data.
^C
--- 10.32.4.1 ping statistics ---
3 packets transmitted, 0 received, 100% packet loss, time 1999ms
[root@cklwe1 /]# ping cklwe2
PING cklwe2.weave.local (10.32.4.1) 56(84) bytes of data.
^C
--- cklwe2.weave.local ping statistics ---
4 packets transmitted, 0 received, 100% packet loss, time 2999ms 五、Calico网络
192.168.2.120 etcd
192.168.2.121 dm1
192.168.2.122 dm2
5.1.在120配置etcd,之前的配置
5.1.1.修改dm1
[root@dm1 /]# vim /etc/systemd/system/docker.service.d/10-machine.conf
[Service]
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --storage-driver overlay2 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provide
r=generic --cluster-store=etcd://192.168.2.120:2379
Environment= 5.1.2.修改dm2
[root@dm2 ~]# vim /etc/systemd/system/docker.service.d/10-machine.conf
[Service]
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --storage-driver overlay2 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provide
r=generic --cluster-store=etcd://192.168.2.120:2379
Environment= 5.2.安装Calico
5.2.1、在dm1安装caclio
[root@dm1 /]# wget https://github.com/projectcalico/calico-containers/releases/download/v3.4.0/calicoctl
[root@dm1 /]# chmod +x calicoctl
[root@dm1 /]# mv calicoctl /usr/local/bin/ 5.2.2、在dm2安装Calico
[root@dm2 ~]# wget https://github.com/projectcalico/calico-containers/releases/download/v3.4.0/calicoctl
[root@dm2 ~]# chmod +x calicoctl
[root@dm2 ~]# mv calicoctl /usr/local/bin/ 5.2.3、dm1添加calico配置文件
[root@dm1 /]# mkdir /etc/calico
[root@dm1 /]# vim /etc/calico/calicoctl.cfg
apiVersion: projectcalico.org/v3
kind: CalicoAPIConfig
metadata:
spec:
etcdEndpoints: http://192.168.2.120:2379
5.2.4.dm1启动calico
[root@dm1 /]# calicoctl node run
Running command to load modules: modprobe -a xt_set ip6_tables
Enabling IPv4 forwarding
Enabling IPv6 forwarding
Increasing conntrack limit
Removing old calico-node container (if running).
Running the following command to start calico-node:
docker run --net=host --privileged --name=calico-node -d --restart=always -e NODENAME=dm1 -e CALICO_NETWORKING_BACKEND=bird -e CALICO_LIBNETWORK_ENABLED=true -e ETCD_ENDPOINTS=http://192.168.2.120:2379 -v /var/log/calico:/var/log/calico -v /var/run/calico:/var/run/calico -v /var/lib/calico:/var/lib/calico -v /lib/modules:/lib/modules -v /run:/run -v /run/docker/plugins:/run/docker/plugins -v /var/run/docker.sock:/var/run/docker.sock quay.io/calico/node:latest
Image may take a short time to download if it is not available locally.
Container started, checking progress logs.
2018-12-24 08:56:01.781 [INFO][10] startup.go 264: Early log level set to info
2018-12-24 08:56:01.781 [INFO][10] startup.go 280: Using NODENAME environment for node name
2018-12-24 08:56:01.781 [INFO][10] startup.go 292: Determined node name: dm1
2018-12-24 08:56:01.783 [INFO][10] startup.go 105: Skipping datastore connection test
2018-12-24 08:56:01.786 [INFO][10] startup.go 365: Building new node resource Name="dm1"
2018-12-24 08:56:01.786 [INFO][10] startup.go 380: Initialize BGP data
2018-12-24 08:56:01.787 [INFO][10] startup.go 582: Using autodetected IPv4 address on interface ens32: 192.168.2.121/24
2018-12-24 08:56:01.787 [INFO][10] startup.go 450: Node IPv4 changed, will check for conflicts
2018-12-24 08:56:01.789 [INFO][10] startup.go 645: No AS number configured on node resource, using global value
2018-12-24 08:56:01.797 [INFO][10] startup.go 534: CALICO_IPV4POOL_NAT_OUTGOING is true (defaulted) through environment variable
2018-12-24 08:56:01.797 [INFO][10] startup.go 797: Ensure default IPv4 pool is created. IPIP mode:
2018-12-24 08:56:01.800 [INFO][10] startup.go 807: Created default IPv4 pool (192.168.0.0/16) with NAT outgoing true. IPIP mode:
2018-12-24 08:56:01.801 [INFO][10] startup.go 534: FELIX_IPV6SUPPORT is true (defaulted) through environment variable
2018-12-24 08:56:01.801 [INFO][10] startup.go 764: IPv6 supported on this platform: true
2018-12-24 08:56:01.801 [INFO][10] startup.go 534: CALICO_IPV6POOL_NAT_OUTGOING is false (defaulted) through environment variable
2018-12-24 08:56:01.801 [INFO][10] startup.go 797: Ensure default IPv6 pool is created. IPIP mode: Never
2018-12-24 08:56:01.804 [INFO][10] startup.go 807: Created default IPv6 pool (fd3b:4e30:d002::/48) with NAT outgoing false. IPIP mode: Never
2018-12-24 08:56:01.812 [INFO][10] startup.go 189: Using node name: dm1
Starting libnetwork service
Calico node started successfully 5.2.5.在dm2添加calico配置文件
[root@dm2 ~]# mkdir /etc/calico
[root@dm2 ~]# vim /etc/calico/calicoctl.cfg
apiVersion: projectcalico.org/v3
kind: CalicoAPIConfig
metadata:
spec:
etcdEndpoints: http://192.168.2.120:2379 5.2.6.dm2启动calico
[root@dm2 ~]# calicoctl node run
Running command to load modules: modprobe -a xt_set ip6_tables
Enabling IPv4 forwarding
Enabling IPv6 forwarding
Increasing conntrack limit
Removing old calico-node container (if running).
Running the following command to start calico-node:
docker run --net=host --privileged --name=calico-node -d --restart=always -e CALICO_NETWORKING_BACKEND=bird -e CALICO_LIBNETWORK_ENABLED=true -e ETCD_ENDPOINTS=http://192.168.2.120:2379 -e NODENAME=dm2 -v /var/log/calico:/var/log/calico -v /var/run/calico:/var/run/calico -v /var/lib/calico:/var/lib/calico -v /lib/modules:/lib/modules -v /run:/run -v /run/docker/plugins:/run/docker/plugins -v /var/run/docker.sock:/var/run/docker.sock quay.io/calico/node:latest
Image may take a short time to download if it is not available locally.
Container started, checking progress logs.
2018-12-24 08:59:24.971 [INFO][10] startup.go 264: Early log level set to info
2018-12-24 08:59:24.972 [INFO][10] startup.go 280: Using NODENAME environment for node name
2018-12-24 08:59:24.972 [INFO][10] startup.go 292: Determined node name: dm2
2018-12-24 08:59:24.975 [INFO][10] startup.go 105: Skipping datastore connection test
2018-12-24 08:59:24.977 [INFO][10] startup.go 365: Building new node resource Name="dm2"
2018-12-24 08:59:24.977 [INFO][10] startup.go 380: Initialize BGP data
2018-12-24 08:59:24.979 [INFO][10] startup.go 582: Using autodetected IPv4 address on interface ens32: 192.168.2.122/24
2018-12-24 08:59:24.979 [INFO][10] startup.go 450: Node IPv4 changed, will check for conflicts
2018-12-24 08:59:24.981 [INFO][10] startup.go 645: No AS number configured on node resource, using global value
2018-12-24 08:59:24.996 [INFO][10] startup.go 189: Using node name: dm2
Starting libnetwork service
Calico node started successfully 5.3.6.创建calico网络
[root@dm1 /]# docker network create --driver calico --ipam-driver calico-ipam ckl_calico
Error response from daemon: plugin "calico" not found calico3.0 开始不支持docker部署,使用k8s。
参考:https://docs.docker.com/network/macvlan/#bridge-mode
https://www.hi-linux.com/posts/54191.html
https://www.cnblogs.com/kevingrace/p/6859114.html
https://docs.projectcalico.org/v1.5/getting-started/docker/tutorials/basic
https://www.suibian.tech/Calico-Cluster-SSL/
|
|