在 node1 中创建 overlay 网络 ov_net1:
[root@linux-node1 ~]# docker network create -d overlay ov_net1 #-d overlay 指定 driver 为 overaly。
[root@linux-node1 ~]# docker network ls #查看当前网络
NETWORK ID NAME DRIVER SCOPE
8eb7fd71a52c bridge bridge local
6ba20168e34f host host local
4e896f9ac4bc none null local
d9652d84d9de ov_net1 overlay global
[root@linux-node2 ~]# docker network ls #查看当前网络
NETWORK ID NAME DRIVER SCOPE
94a3bc259414 bridge bridge local
f8443f6cb8d2 host host local
2535ab8f3493 none null local
d9652d84d9de ov_net1 overlay global
node2 上也能看到 ov_net1。这是因为创建 ov_net1 时 node1 将 overlay 网络信息存入了 consul,node2 从 consul 读取到了新网络的数据。之后 ov_net 的任何变化都会同步到 node1 和 node2。
[root@linux-node1 ~]# docker network inspect ov_net1 #查看 ov_net1 的详细信息
[
{
"Name": "ov_net1",
"Id": "d9652d84d9de6d1145c77d0254c90164b968f72f2eda4aee43d56ab03f8530ed",
"Created": "2018-04-19T21:50:29.128801226+08:00",
"Scope": "global",
"Driver": "overlay",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "10.0.0.0/24",
"Gateway": "10.0.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Containers": {},
"Options": {},
"Labels": {}
}
] (3) overlay 中运行容器
[root@linux-node1 ~]# docker run -itd --name bbox1 --network ov_net1 busybox
340f748b06786c0f81c3e26dd9dbd820dafcdf73baa9232f02aece8d4c89a73b
[root@linux-node1 ~]# docker exec bbox1 ip r #查看容器的网络配置
default via 172.18.0.1 dev eth1
10.0.0.0/24 dev eth0 scope link src 10.0.0.2
172.18.0.0/16 dev eth1 scope link src 172.18.0.2
[root@linux-node2 ~]# docker run -itd --name bbox2 --network ov_net1 busybox
[root@linux-node2 ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
68c81b90fb86 busybox "sh" 2 days ago Up 2 days bbox2
[root@linux-node2 ~]# docker exec bbox2 ip r
default via 172.18.0.1 dev eth1
10.0.0.0/24 dev eth0 scope link src 10.0.0.3
172.18.0.0/16 dev eth1 scope link src 172.18.0.2
##bbox2 IP 为 10.0.0.3,可以直接 ping bbox1
[root@linux-node2 ~]# docker exec bbox2 ping -c 3 bbox1
PING bbox1 (10.0.0.2): 56 data bytes
64 bytes from 10.0.0.2: seq=0 ttl=64 time=154.064 ms
64 bytes from 10.0.0.2: seq=1 ttl=64 time=0.789 ms
64 bytes from 10.0.0.2: seq=2 ttl=64 time=0.539 ms
--- bbox1 ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max = 0.539/51.797/154.064 ms
[root@linux-node1 ~]# docker run -itd --name bbox3 --network ov_net2 busybox
946def609a7b183f68b8398b35fd3f72dc28bff47cc2ba63467f266fde297d5a
[root@linux-node1 ~]# docker exec -it bbox3 ip r
default via 172.18.0.1 dev eth1
10.0.1.0/24 dev eth0 scope link src 10.0.1.2 ##bbox3的ip为10.0.1.2
172.18.0.0/16 dev eth1 scope link src 172.18.0.4
[root@linux-node1 ~]# docker exec -it bbox3 ping -c 2 10.0.0.3 #bbox3无法ping通bbox2
PING 10.0.0.3 (10.0.0.3): 56 data bytes
^C
--- 10.0.0.3 ping statistics ---
2 packets transmitted, 0 packets received, 100% packet loss
如果要实现 bbox3 与 bbox2 通信,可以将 bbox3 也连接到 ov_net1。
[root@linux-node1 ~]# docker network connect ov_net1 bbox3
[root@linux-node1 ~]# docker exec -it bbox3 ping -c 2 10.0.0.3
PING 10.0.0.3 (10.0.0.3): 56 data bytes
64 bytes from 10.0.0.3: seq=0 ttl=64 time=34.110 ms
64 bytes from 10.0.0.3: seq=1 ttl=64 time=0.745 ms
--- 10.0.0.3 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 0.745/17.427/34.110 ms
docker默认为 overlay网络分配 24 位掩码的子网(10.0.X.0/24),所有主机共享这个 subnet,容器启动时会顺序从此空间分配 IP。当然我们也可以通过--subnet 指定 IP 空间。
[root@linux-node1 ~]# docker network create -d overlay --subnet 10.22.1.0/24 ov_net3a111191fa67e500015a2f3ab8166793d23f0adef4d66bfcee81166127915ff9f
[root@linux-node1 ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
8eb7fd71a52c bridge bridge local
751bd423a345 docker_gwbridge bridge local
6ba20168e34f host host local
4e896f9ac4bc none null local
d9652d84d9de ov_net1 overlay global
667cc7ef7427 ov_net2 overlay global
a111191fa67e ov_net3 overlay global