设为首页 收藏本站
查看: 668|回复: 0

[经验分享] 在Kubernetes集群中使用calico做网络驱动的配置方法

[复制链接]

尚未签到

发表于 2018-1-4 18:00:30 | 显示全部楼层 |阅读模式
# This ConfigMap is used to configure a self-hosted Calico installation.  
kind: ConfigMap
  
apiVersion: v1
  
metadata:
  name: calico
-config  namespace: kube
-system  
data:
  # The location of your etcd cluster.  This uses the Service clusterIP
  # defined below.
  #
192.168.182.128是我虚拟机的地址 也就是Kubernetes master节点所在机器的地址,下面配置中用到的这个地址同理  etcd_endpoints:
"http://192.168.182.128:6666"  

  # True enables BGP networking,
false tells Calico to enforce  # policy only, using native networking.
  enable_bgp:
"true"  

  # The CNI network configuration to
install on each node.  cni_network_config:
|-  {
"name": "k8s-pod-network","type": "calico","etcd_endpoints": "http://192.168.182.128:6666","log_level": "info","ipam": {"type": "calico-ipam"  },
"policy": {"type": "k8s","k8s_api_root": "https://192.168.182.128:6443","k8s_auth_token": ""  },
"kubernetes": {"kubeconfig": "/etc/kubernetes/kubelet.conf"  }
  }
  

  # The default IP Pool to be created
for the cluster.  # Pod IP addresses will be assigned from this pool.
  ippool.yaml:
|  apiVersion: v1
  kind: ipPool
  metadata:
  cidr:
10.1.0.0/16  spec:
  ipip:
  enabled:
true  nat
-outgoing: true  

  
---
  

  
# This manifest installs the Calico etcd on the kubeadm master.  This uses a DaemonSet
  
# to force it to run on the master even when the master isn't schedulable, and uses
  
# nodeSelector to ensure it only runs on the master.
  
apiVersion: extensions/v1beta1
  
kind: DaemonSet
  
metadata:
  name: calico-etcd
  namespace: kube-system
  labels:
  k8s-app: calico-etcd
  
spec:
  template:
  metadata:
  labels:
  k8s-app: calico-etcd
  annotations:
  scheduler.alpha.kubernetes.io/critical-pod: ''
  scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },

  {"key":"CriticalAddonsOnly", "operator":"Exists"}]
  spec:
  # Only run this pod on the master.
  nodeSelector:
  kubeadm.alpha.kubernetes.io/role: master
  hostNetwork: true
  containers:
  - name: calico-etcd
  image: k8s/etcd:v3.0.15
  env:
  - name: CALICO_ETCD_IP
  valueFrom:
  fieldRef:
  fieldPath: status.podIP
  command: ["/bin/sh","-c"]
  args: ["/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$CALICO_ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"]
  volumeMounts:
  - name: var-etcd
  mountPath: /var/etcd
  volumes:
  - name: var-etcd
  hostPath:
  path: /var/etcd
  

  
---
  

  
# This manfiest installs the Service which gets traffic to the Calico
  
# etcd.
  
apiVersion: v1
  
kind: Service
  
metadata:
  labels:
  k8s-app: calico-etcd
  name: calico-etcd
  namespace: kube-system
  
spec:
  # Select the calico-etcd pod running on the master.
  selector:
  k8s-app: calico-etcd

  # This ClusterIP needs to be known in advance, since we cannot>  # on DNS to get access to etcd.
  clusterIP: None
  ports:
  - port: 6666
  

  
---
  

  
# This manifest installs the calico/node container, as well
  
# as the Calico CNI plugins and network config on
  
# each master and worker node in a Kubernetes cluster.
  
kind: DaemonSet
  
apiVersion: extensions/v1beta1
  
metadata:
  name: calico-node
  namespace: kube-system
  labels:
  k8s-app: calico-node
  
spec:
  selector:
  matchLabels:
  k8s-app: calico-node
  template:
  metadata:
  labels:
  k8s-app: calico-node
  annotations:
  scheduler.alpha.kubernetes.io/critical-pod: ''
  scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },

  {"key":"CriticalAddonsOnly", "operator":"Exists"}]
  spec:
  hostNetwork: true
  containers:
  # Runs calico/node container on each Kubernetes node.  This
  # container programs network policy and routes on each
  # host.
  - name: calico-node
  image: calico/node:v1.0.2
  env:
  # The location of the Calico etcd cluster.
  - name: ETCD_ENDPOINTS
  valueFrom:
  configMapKeyRef:
  name: calico-config
  key: etcd_endpoints
  # Enable BGP.  Disable to enforce policy only.
  - name: CALICO_NETWORKING
  valueFrom:
  configMapKeyRef:
  name: calico-config
  key: enable_bgp
  # Disable file logging so `kubectl logs` works.
  - name: CALICO_DISABLE_FILE_LOGGING
  value: "true"
  # Set Felix endpoint to host default action to ACCEPT.
  - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
  value: "ACCEPT"
  # Don't configure a default pool.  This is done by the Job
  
            # below.
  - name: NO_DEFAULT_POOLS
  value: "true"
  # Auto-detect the BGP IP address.
  - name: IP
  value: ""
  securityContext:
  privileged: true
  volumeMounts:
  - mountPath: /lib/modules
  name: lib-modules
  readOnly: true
  - mountPath: /var/run/calico
  name: var-run-calico
  readOnly: false
  # This container installs the Calico CNI binaries
  # and CNI network config file on each node.
  - name: install-cni
  image: quay.io/calico/cni:v1.5.5
  command: ["/install-cni.sh"]
  env:
  # The location of the Calico etcd cluster.
  - name: ETCD_ENDPOINTS
  valueFrom:
  configMapKeyRef:
  name: calico-config
  key: etcd_endpoints
  # The CNI network config to install on each node.
  - name: CNI_NETWORK_CONFIG
  valueFrom:
  configMapKeyRef:
  name: calico-config
  key: cni_network_config
  volumeMounts:
  - mountPath: /host/opt/cni/bin
  name: cni-bin-dir
  - mountPath: /host/etc/cni/net.d
  name: cni-net-dir
  volumes:
  # Used by calico/node.
  - name: lib-modules
  hostPath:
  path: /lib/modules
  - name: var-run-calico
  hostPath:
  path: /var/run/calico
  # Used to install CNI.
  - name: cni-bin-dir
  hostPath:
  path: /opt/cni/bin
  - name: cni-net-dir
  hostPath:
  path: /etc/cni/net.d
  

  
---
  

  
# This manifest deploys the Calico policy controller on Kubernetes.
  
# See https://github.com/projectcalico/k8s-policy
  
apiVersion: extensions/v1beta1
  
kind: Deployment
  
metadata:
  name: calico-policy-controller
  namespace: kube-system
  labels:
  k8s-app: calico-policy
  
spec:
  # The policy controller can only have a single active instance.
  replicas: 1
  strategy:
  type: Recreate
  template:
  metadata:
  name: calico-policy-controller
  namespace: kube-system
  labels:
  k8s-app: calico-policy-controller
  annotations:
  scheduler.alpha.kubernetes.io/critical-pod: ''
  scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },

  {"key":"CriticalAddonsOnly", "operator":"Exists"}]
  spec:
  # The policy controller must run in the host network namespace so that
  # it isn't governed by policy that would prevent it from working.
  hostNetwork: true
  containers:
  - name: calico-policy-controller
  image: calico/kube-policy-controller:v0.5.2
  env:
  # The location of the Calico etcd cluster.
  - name: ETCD_ENDPOINTS
  valueFrom:
  configMapKeyRef:
  name: calico-config
  key: etcd_endpoints
  # The location of the Kubernetes API.  Use the default Kubernetes
  # service for API access.
  - name: K8S_API
  value: "https://kubernetes.default:443"
  # Since we're running in the host namespace and might not have KubeDNS
  # access, configure the container's /etc/hosts to resolve
  
            # kubernetes.default to the correct service clusterIP.
  - name: CONFIGURE_ETC_HOSTS
  value: "true"
  

  
---
  

  
## This manifest deploys a Job which performs one time
  
# configuration of Calico
  
apiVersion: batch/v1
  
kind: Job
  
metadata:
  name: configure-calico
  namespace: kube-system
  labels:
  k8s-app: calico
  
spec:
  template:
  metadata:
  name: configure-calico
  annotations:
  scheduler.alpha.kubernetes.io/critical-pod: ''
  scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },

  {"key":"CriticalAddonsOnly", "operator":"Exists"}]
  spec:
  hostNetwork: true
  restartPolicy: OnFailure
  containers:
  # Writes basic configuration to datastore.
  - name: configure-calico
  image: calico/ctl:v1.0.2
  args:
  - apply
  - -f
  - /etc/config/calico/ippool.yaml
  volumeMounts:
  - name: config-volume
  mountPath: /etc/config
  env:
  # The location of the etcd cluster.
  - name: ETCD_ENDPOINTS
  valueFrom:
  configMapKeyRef:
  name: calico-config
  key: etcd_endpoints
  volumes:
  - name: config-volume
  configMap:
  name: calico-config
  items:
  - key: ippool.yaml
  path: calico/ippool.yaml

运维网声明 1、欢迎大家加入本站运维交流群:群②:261659950 群⑤:202807635 群⑦870801961 群⑧679858003
2、本站所有主题由该帖子作者发表,该帖子作者与运维网享有帖子相关版权
3、所有作品的著作权均归原作者享有,请您和我们一样尊重他人的著作权等合法权益。如果您对作品感到满意,请购买正版
4、禁止制作、复制、发布和传播具有反动、淫秽、色情、暴力、凶杀等内容的信息,一经发现立即删除。若您因此触犯法律,一切后果自负,我们对此不承担任何责任
5、所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其内容的准确性、可靠性、正当性、安全性、合法性等负责,亦不承担任何法律责任
6、所有作品仅供您个人学习、研究或欣赏,不得用于商业或者其他用途,否则,一切后果均由您自己承担,我们对此不承担任何法律责任
7、如涉及侵犯版权等问题,请您及时通知我们,我们将立即采取措施予以解决
8、联系人Email:admin@iyunv.com 网址:www.yunweiku.com

所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其承担任何法律责任,如涉及侵犯版权等问题,请您及时通知我们,我们将立即处理,联系人Email:kefu@iyunv.com,QQ:1061981298 本贴地址:https://www.yunweiku.com/thread-431610-1-1.html 上篇帖子: 大卡尔测试行业从业者,致力于用技术手段提高测试生产力 下篇帖子: 手工安装kubernetes
您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

扫码加入运维网微信交流群X

扫码加入运维网微信交流群

扫描二维码加入运维网微信交流群,最新一手资源尽在官方微信交流群!快快加入我们吧...

扫描微信二维码查看详情

客服E-mail:kefu@iyunv.com 客服QQ:1061981298


QQ群⑦:运维网交流群⑦ QQ群⑧:运维网交流群⑧ k8s群:运维网kubernetes交流群


提醒:禁止发布任何违反国家法律、法规的言论与图片等内容;本站内容均来自个人观点与网络等信息,非本站认同之观点.


本站大部分资源是网友从网上搜集分享而来,其版权均归原作者及其网站所有,我们尊重他人的合法权益,如有内容侵犯您的合法权益,请及时与我们联系进行核实删除!



合作伙伴: 青云cloud

快速回复 返回顶部 返回列表