设为首页 收藏本站
查看: 1112|回复: 0

[经验分享] Kubernetes集群测试环境搭建

[复制链接]

尚未签到

发表于 2018-9-16 07:41:04 | 显示全部楼层 |阅读模式
1、规划:
  节点:
  Master:
  192.168.88.71
  Node:
  192.168.88.81
  环境:
  [root@localhost ~]# cat /etc/redhat-release

  CentOS Linux>  [root@localhost ~]# uname -a
  Linux localhost.localdomain3.10.0-327.el7.x86_64 #1 SMP Thu Nov 19 22:10:57 UTC 2015 x86_64 x86_64 x86_64GNU/Linux
  禁用防火墙:
  systemctl disable firewalld
  systemctl stop firewalld
2、安装:
  Master:
  yum -y install etcd kubernetes flannel
  Slave:
  yum -y install docker kubernetes flannel
3、Master配置:
  Master
(1)etcd配置:
  [root@localhost ~]# more/usr/lib/systemd/system/etcd.service
  [Unit]
  Description=Etcd Server
  After=network.target
  [Service]
  Type=simple
  WorkingDirectory=/var/lib/etcd/
  EnvironmentFile=-/etc/etcd/etcd.conf
  ExecStart=/usr/bin/etcd
  [Install]
  WantedBy=multi-user.target
  [root@localhost ~]# more/etc/etcd/etcd.conf
  ETCD_NAME=default
  ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
  ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
(2)flannel配置:
  [root@localhost etc]# more/usr/lib/systemd/system/flanneld.service
  [Unit]
  Description=Flanneld overlay address etcdagent
  After=network.target
  After=network-online.target
  Wants=network-online.target
  #After=etcd.service
  Before=docker.service
  [Service]
  Type=notify
  EnvironmentFile=/etc/sysconfig/flanneld
  #EnvironmentFile=-/etc/sysconfig/docker-network
  ExecStart=/usr/bin/flanneld-etcd-endpoints=${FLANNEL_ETCD} $FLANNEL_OPTIONS
  #ExecStartPost=/usr/libexec/flannel/mk-docker-opts.sh-k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker
  #Restart=on-failure
  [Install]
  WantedBy=multi-user.target
  RequiredBy=docker.service
  [root@localhost etc]# more/etc/sysconfig/flanneld
  FLANNEL_ETCD="http://192.168.88.71:2379"
  FLANNEL_ETCD_KEY="/atomic.io/network"
  etcdctl set /atomic.io/network/config '{"Network": "10.1.0.0/16" }'
(3)apiserver配置:
  [root@localhost etc]# more/usr/lib/systemd/system/kube-apiserver.service
  [Unit]
  Description=Kubernetes API Server
  Documentation=https://github.com/GoogleCloudPlatform/kubernetes
  After=etcd.service
  Wants=etcd.service
  [Service]
  EnvironmentFile=/etc/kubernetes/apiserver
  ExecStart=/usr/bin/kube-apiserver \
  $KUBE_API_ARGS
  Restart=on-failure
  Type=notify
  LimitNOFILE=65536
  [Install]
  WantedBy=multi-user.target
  [root@localhost kubernetes]# more/etc/kubernetes/apiserver
  KUBE_API_ARGS="--etcd_servers=http://127.0.0.1:2379--insecure-bind-address=0.0.0.0 --insecure-port=8080--service-cluster-ip-range=10.1.0.0/16 --se
  rvice-node-port-range=1-65535--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ResourceQuota--logtostderr=false --log-dir=/va
  r/log/kubernetes --v=2"
(4)controller-manager配置:
  [root@localhost etc]# more/usr/lib/systemd/system/kube-controller-manager.service
  [Unit]
  Description=Kubernetes Controller Manager
  Documentation=https://github.com/GoogleCloudPlatform/kubernetes
  After=kube-apiserver.service
  Requires=kube-apiserver.service
  [Service]
  EnvironmentFile=/etc/kubernetes/controller-manager
  ExecStart=/usr/bin/kube-controller-manager\
  $KUBE_CONTROLLER_MANAGER_ARGS
  Restart=on-failure
  LimitNOFILE=65536
  [Install]
  WantedBy=multi-user.target
  [root@localhost kubernetes]# more/etc/kubernetes/controller-manager
  KUBE_CONTROLLER_MANAGER_ARGS="--master=http://192.168.88.71:8080--logtostderr=false --log-dir=/var/log/kubernetes --v=2"
(5)scheduler配置:
  [Unit]
  Description=Kubernetes Scheduler Plugin
  Documentation=https://github.com/GoogleCloudPlatform/kubernetes
  After=kube-apiserver.service
  Requires=kube-apiserver.service
  [Service]
  EnvironmentFile=-/etc/kubernetes/scheduler
  ExecStart=/usr/bin/kube-scheduler$KUBE_SCHEDULER_ARGS
  Restart=on-failure
  LimitNOFILE=65536
  [Install]
  WantedBy=multi-user.target
  [root@localhost kubernetes]# more/etc/kubernetes/scheduler
  KUBE_SCHEDULER_ARGS="--master=http://192.168.88.71:8080--logtostderr=false --log-dir=/var/log/kubernetes --v=2"
4、Slave配置:
(1)Flannel配置:
  [root@localhost ~]# more/usr/lib/systemd/system/flanneld.service
  [Unit]
  Description=Flanneld overlay address etcdagent
  After=network.target
  After=network-online.target
  Wants=network-online.target
  #After=etcd.service
  Before=docker.service
  [Service]
  Type=notify
  EnvironmentFile=/etc/sysconfig/flanneld
  #EnvironmentFile=-/etc/sysconfig/docker-network
  ExecStart=/usr/bin/flanneld-etcd-endpoints=${FLANNEL_ETCD} $FLANNEL_OPTIONS
  #ExecStartPost=/usr/libexec/flannel/mk-docker-opts.sh-k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker
  #Restart=on-failure
  [Install]
  WantedBy=multi-user.target
  RequiredBy=docker.service
  [root@localhost ~]# more/etc/sysconfig/flanneld
  FLANNEL_ETCD="http://192.168.88.71:2379"
  FLANNEL_ETCD_KEY="/atomic.io/network"
  注:
  启动flanneld服务之前,需要在master etcd中添加一条网络配置记录,该配置用于flanneld分配给每个docker的虚拟IP地址段。
  etcdctl set /atomic.io/network/config '{"Network": "10.1.0.0/16" }'
  flannel将覆盖docker0网桥,故先停止docker服务。设置docker0的网桥IP地址:
  mk-docker-opts.sh -i
  source /run/flannel/subnet.env
  ifconfig docker0 ${FLANNEL_SUBNET}
(2)Kubelet配置:
  [root@localhost ~]# more/usr/lib/systemd/system/kubelet.service
  [Unit]
  Description=Kubernetes Kubelet Server
  Documentation=https://github.com/GoogleCloudPlatform/kubernetes
  After=docker.service
  Requires=docker.service
  [Service]
  WorkingDirectory=/var/lib/kubelet
  #EnvironmentFile=-/etc/kubernetes/config
  EnvironmentFile=-/etc/kubernetes/kubelet
  ExecStart=/usr/bin/kubelet \
  $KUBELET_ARGS
  Restart=on-failure
  [Install]
  WantedBy=multi-user.target
  [root@localhost ~]# more/etc/kubernetes/kubelet
  KUBELET_ARGS="--api-servers=http://192.168.88.71:8080--hostname-override=192.168.88.10 --cluster_dns=10.1.0.100 --cluster_domain=cluster.local--log
  tostderr=false--log-dir=/var/log/kubernetes --v=2--pod_infra_container_image=index.tenxcloud.com/google_containers/pause-amd64:3.0"
(3)kube-proxy配置:
  [root@localhost ~]# more/usr/lib/systemd/system/kube-proxy.service
  [Unit]
  Description=Kubernetes Kube-Proxy Server
  Documentation=https://github.com/GoogleCloudPlatform/kubernetes
  After=network.target
  Requires=network.service
  [Service]
  EnvironmentFile=-/etc/kubernetes/proxy
  ExecStart=/usr/bin/kube-proxy \
  $KUBE_PROXY_ARGS
  Restart=on-failure
  LimitNOFILE=65536
  [Install]
  WantedBy=multi-user.target
  [root@localhost ~]# more/etc/kubernetes/proxy
  KUBE_PROXY_ARGS="--master=http://192.168.88.71:8080--logtostderr=false --log-dir=/var/log/kubernetes --v=2"
5、启动:
(1)master:
  systemctl start flanneld
  systemctl status flanneld
  systemctl start etcd
  systemctl status etcd
  systemctl start kube-apiserver.service
  systemctl status kube-apiserver.service
  systemctl startkube-controller-manager.service
  systemctl status kube-controller-manager.service
  systemctl start kube-scheduler.service
  systemctl status kube-scheduler.service
(2)slave:
  systemctl start docker
  systemctl status docker
  systemctl start kubelet
  systemctl status kubelet
  systemctl start kube-proxy.service
  systemctl status kube-proxy.service
(3)测试:
  kubectl get node
6、Dashboard:
  kubectl create -f kubernetes-dashboard.yaml
  more kubernetes-dashboard.yaml
  # Copyright 2015 Google Inc. All RightsReserved.
  #
  # Licensed under the Apache License,Version 2.0 (the "License");
  # you may not use this file except incompliance with the License.
  # You may obtain a copy of the License at
  #
  #    http://www.apache.org/licenses/LICENSE-2.0
  #
  # Unless required by applicable law oragreed to in writing, software
  # distributed under the License isdistributed on an "AS IS" BASIS,
  # WITHOUT WARRANTIES OR CONDITIONS OF ANYKIND, either express or implied.
  # See the License for the specific languagegoverning permissions and
  # limitations under the License.

  # Configuration to deploy>  #
  # Example usage: kubectl create -f
  kind: Deployment
  apiVersion: extensions/v1beta1
  metadata:
  labels:
  app: kubernetes-dashboard
  version: v1.1.1
  name: kubernetes-dashboard
  namespace: kube-system
  spec:
  replicas: 1
  selector:
  matchLabels:
  app: kubernetes-dashboard
  template:
  metadata:
  labels:
  app: kubernetes-dashboard
  spec:
  containers:
  - name: kubernetes-dashboard
  image: index.tenxcloud.com/google_containers/kubernetes-dashboard-amd64:v1.4.1
  imagePullPolicy: Always
  ports:
  - containerPort: 9090
  protocol: TCP
  args:
  # Uncomment the following line to manually specify Kubernetes API serverHost
  # If not specified, Dashboard willattempt to auto discover the API server and connect
  # to it. Uncomment only if the default does not work.
  - --apiserver-host=http://192.168.88.71:8080
  livenessProbe:
  httpGet:
  path: /
  port: 9090
  initialDelaySeconds: 30
  timeoutSeconds: 30
  ---
  kind: Service
  apiVersion: v1
  metadata:
  labels:
  app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
  spec:
  type: NodePort
  ports:
  -port: 80
  targetPort: 9090
  selector:
  app: kubernetes-dashboard
  访问:
  http://192.168.88.71:8080/ui


运维网声明 1、欢迎大家加入本站运维交流群:群②:261659950 群⑤:202807635 群⑦870801961 群⑧679858003
2、本站所有主题由该帖子作者发表,该帖子作者与运维网享有帖子相关版权
3、所有作品的著作权均归原作者享有,请您和我们一样尊重他人的著作权等合法权益。如果您对作品感到满意,请购买正版
4、禁止制作、复制、发布和传播具有反动、淫秽、色情、暴力、凶杀等内容的信息,一经发现立即删除。若您因此触犯法律,一切后果自负,我们对此不承担任何责任
5、所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其内容的准确性、可靠性、正当性、安全性、合法性等负责,亦不承担任何法律责任
6、所有作品仅供您个人学习、研究或欣赏,不得用于商业或者其他用途,否则,一切后果均由您自己承担,我们对此不承担任何法律责任
7、如涉及侵犯版权等问题,请您及时通知我们,我们将立即采取措施予以解决
8、联系人Email:admin@iyunv.com 网址:www.yunweiku.com

所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其承担任何法律责任,如涉及侵犯版权等问题,请您及时通知我们,我们将立即处理,联系人Email:kefu@iyunv.com,QQ:1061981298 本贴地址:https://www.yunweiku.com/thread-584303-1-1.html 上篇帖子: kubernetes网络模型分析 下篇帖子: 如何用Rancher在AWS上运行Kubernetes-12452495
您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

扫码加入运维网微信交流群X

扫码加入运维网微信交流群

扫描二维码加入运维网微信交流群,最新一手资源尽在官方微信交流群!快快加入我们吧...

扫描微信二维码查看详情

客服E-mail:kefu@iyunv.com 客服QQ:1061981298


QQ群⑦:运维网交流群⑦ QQ群⑧:运维网交流群⑧ k8s群:运维网kubernetes交流群


提醒:禁止发布任何违反国家法律、法规的言论与图片等内容;本站内容均来自个人观点与网络等信息,非本站认同之观点.


本站大部分资源是网友从网上搜集分享而来,其版权均归原作者及其网站所有,我们尊重他人的合法权益,如有内容侵犯您的合法权益,请及时与我们联系进行核实删除!



合作伙伴: 青云cloud

快速回复 返回顶部 返回列表