设为首页 收藏本站
查看: 803|回复: 0

[经验分享] kubernetes1.9 二进制安装

[复制链接]
累计签到:1 天
连续签到:1 天
发表于 2018-4-9 11:08:27 | 显示全部楼层 |阅读模式


一、环境准备:
m1:192.168.122.21 软件: etcd flanneld kube-apiserver kube-controller-manager kube-scheduler kube-proxy
m2:192.168.122.22 软件: etcd flanneld kube-apiserver kube-controller-manager kube-scheduler kube-proxy
m3:192.168.122.23 软件: etcd flanneld kube-apiserver kube-controller-manager kube-scheduler kube-proxy
node1:192.168.122.24 软件: flanneld kube-proxy kubelet docker
node2:192.168.122.25 软件: flanneld kube-proxy kubelet docker

二、配置ssl
(1)生成的 CA 证书和秘钥文件如下:
ca-key.pem
ca.pem
kubernetes-key.pem
kubernetes.pem
kube-proxy.pem
kube-proxy-key.pem
admin.pem
admin-key.pem

(2)使用证书的组件如下:
etcd:使用 ca.pem、kubernetes-key.pem、kubernetes.pem;
kube-apiserver:使用 ca.pem、kubernetes-key.pem、kubernetes.pem;
kubelet:使用 ca.pem;
kube-proxy:使用 ca.pem、kube-proxy-key.pem、kube-proxy.pem;
kubectl:使用 ca.pem、admin-key.pem、admin.pem;

(3)创建 CA 配置文件

mkdir /opt/ssl
cd /opt/ssl
cfssl print-defaults config > config.json
cfssl print-defaults csr > csr.json
cat > ca-config.json <<'HERE'
{
  "signing": {
    "default": {
      "expiry": "8760h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "8760h"
      }
    }
  }
}
HERE

(4)创建 CA 证书签名请求

cat > ca-csr.json << 'HERE'
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
HERE
cfssl gencert -initca ca-csr.json | cfssljson -bare ca

(5)创建 Kubernetes 证书

cat > kubernetes-csr.json << 'HERE'
{
    "CN": "kubernetes",
    "hosts": [
      "127.0.0.1",
      "192.168.122.21",
      "192.168.122.22",
      "192.168.122.23",
      "192.168.122.24",
      "192.168.122.25",
      "192.168.122.100",
      "172.21.0.1",
      "172.20.0.1",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "BeiJing",
            "L": "BeiJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
HERE
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes

(6)创建 Admin 证书

cat > admin-csr.json << 'HERE'
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
HERE
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

(7)创建 Kube-Proxy 证书

cat > kube-proxy-csr.json << 'HERE'
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
HERE
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes  kube-proxy-csr.json | cfssljson -bare kube-proxy

(8)分发证书
mkdir -p /etc/kubernetes/ssl
cp *.pem /etc/kubernetes/ssl
scp /etc/kubernetes/ssl m2+m3+node1+node2

三、etcd服务器
(1)M1

mkdir /var/lib/etcd
cat > /usr/lib/systemd//system/etcd.service << 'HERE'
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
  --name 'etcd-host1' \
  --cert-file=/etc/kubernetes/ssl/kubernetes.pem \
  --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
  --peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
  --peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
  --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --initial-advertise-peer-urls https://192.168.122.21:2380 \
  --listen-peer-urls https://192.168.122.21:2380 \
  --listen-client-urls https://192.168.122.21:2379,http://127.0.0.1:2379 \
  --advertise-client-urls https://192.168.122.21:2379 \
  --initial-cluster-token etcd-cluster-0 \
  --initial-cluster etcd-host1=https://192.168.122.21:2380,etcd-host2=https://192.168.122.22:2380,etcd-host3=https://192.168.122.23:2380 \
  --initial-cluster-state new \
  --data-dir /var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
HERE

(2)M2

mkdir /var/lib/etcd
cat > /usr/lib/systemd//system/etcd.service << 'HERE'
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
  --name 'etcd-host2' \
  --cert-file=/etc/kubernetes/ssl/kubernetes.pem \
  --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
  --peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
  --peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
  --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --initial-advertise-peer-urls https://192.168.122.22:2380 \
  --listen-peer-urls https://192.168.122.22:2380 \
  --listen-client-urls https://192.168.122.22:2379,http://127.0.0.1:2379 \
  --advertise-client-urls https://192.168.122.22:2379 \
  --initial-cluster-token etcd-cluster-0 \
  --initial-cluster etcd-host1=https://192.168.122.21:2380,etcd-host2=https://192.168.122.22:2380,etcd-host3=https://192.168.122.23:2380 \
  --initial-cluster-state new \
  --data-dir /var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
HERE

(3)M3

mkdir /var/lib/etcd
cat > /usr/lib/systemd//system/etcd.service << 'HERE'
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
  --name 'etcd-host3' \
  --cert-file=/etc/kubernetes/ssl/kubernetes.pem \
  --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
  --peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
  --peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
  --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --initial-advertise-peer-urls https://192.168.122.23:2380 \
  --listen-peer-urls https://192.168.122.23:2380 \
  --listen-client-urls https://192.168.122.23:2379,http://127.0.0.1:2379 \
  --advertise-client-urls https://192.168.122.23:2379 \
  --initial-cluster-token etcd-cluster-0 \
  --initial-cluster etcd-host1=https://192.168.122.21:2380,etcd-host2=https://192.168.122.22:2380,etcd-host3=https://192.168.122.23:2380 \
  --initial-cluster-state new \
  --data-dir /var/lib/etcd

Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
HERE

(4)M1+M2+M3

systemctl stop firewalld &&systemctl daemon-reload && systemctl stop firewalld && systemctl restart etcd && systemctl status etcd
etcdctl --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/kubernetes/ssl/kubernetes.pem --key-file=/etc/kubernetes/ssl/kubernetes-key.pem --endpoints "https://192.168.122.21:2379,https://192.168.122.22:2379,https://192.168.122.23:2379" cluster-health

四、安装kube-apiserver
(1)M1

cat > /usr/lib/systemd/system/kube-apiserver.service << 'HERE'
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \
--advertise-address=192.168.122.21 \
--bind-address=192.168.122.21 \
--insecure-bind-address=127.0.0.1 \
--kubelet-https=true \
--runtime-config=rbac.authorization.k8s.io/v1beta1 \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth \
--service-cluster-ip-range=172.21.0.0/16 \
--service-node-port-range=3000-9000 \
--etcd-servers=https://192.168.122.21:2379,https://192.168.122.22:2379,https://192.168.122.23:2379 \
--enable-swagger-ui=true \
--allow-privileged=true \
--apiserver-count=3 \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/lib/audit.log \
--event-ttl=1h \
--token-auth-file=/etc/kubernetes/token.csv  \
--tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/etc/kubernetes/ssl/ca.pem \
--etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \
--etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem
--v=2
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
HERE

(2)M2

cat > /usr/lib/systemd/system/kube-apiserver.service << 'HERE'
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \
--advertise-address=192.168.122.22 \
--bind-address=192.168.122.22 \
--insecure-bind-address=127.0.0.1 \
--kubelet-https=true \
--runtime-config=rbac.authorization.k8s.io/v1beta1 \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth \
--service-cluster-ip-range=172.21.0.0/16 \
--service-node-port-range=3000-9000 \
--etcd-servers=https://192.168.122.21:2379,https://192.168.122.22:2379,https://192.168.122.23:2379 \
--enable-swagger-ui=true \
--allow-privileged=true \
--apiserver-count=3 \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/lib/audit.log \
--event-ttl=1h \
--token-auth-file=/etc/kubernetes/token.csv  \
--tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/etc/kubernetes/ssl/ca.pem \
--etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \
--etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem
--v=2
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
HERE

(3)M3

cat > /usr/lib/systemd/system/kube-apiserver.service << 'HERE'
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \
--advertise-address=192.168.122.23 \
--bind-address=192.168.122.23 \
--insecure-bind-address=127.0.0.1 \
--kubelet-https=true \
--runtime-config=rbac.authorization.k8s.io/v1beta1 \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth \
--service-cluster-ip-range=172.21.0.0/16 \
--service-node-port-range=3000-9000 \
--etcd-servers=https://192.168.122.21:2379,https://192.168.122.22:2379,https://192.168.122.23:2379 \
--enable-swagger-ui=true \
--allow-privileged=true \
--apiserver-count=3 \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/lib/audit.log \
--event-ttl=1h \
--token-auth-file=/etc/kubernetes/token.csv  \
--tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/etc/kubernetes/ssl/ca.pem \
--etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \
--etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem
--v=2
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
HERE

(4)M1+M2+M3
systemctl daemon-reload && systemctl restart kube-apiserver && systemctl status kube-apiserver && systemctl enable kube-apiserver

五、kube-controller-manager安装
(1)M1+M2+M3

cat > /usr/lib/systemd/system/kube-controller-manager.service << 'HERE'
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
--address=127.0.0.1 \
--master=http://127.0.0.1:8080 \
--allocate-node-cidrs=true \
--service-cluster-ip-range=172.21.0.0/16 \
--cluster-cidr=172.20.0.0/16 \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem  \
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
--root-ca-file=/etc/kubernetes/ssl/ca.pem \
--leader-elect=true \
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
HERE

(2)M1+M2+M3
systemctl daemon-reload && systemctl restart kube-controller-manager && systemctl status kube-controller-manager && systemctl enable kube-controller-manager

六、kuber-scheduler安装
(1)M1+M2+M3

cat > /usr/lib/systemd/system/kube-scheduler.service << 'HERE'
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
ExecStart=/usr/local/bin/kube-scheduler \
--address=127.0.0.1 \
--master=http://127.0.0.1:8080 \
--leader-elect=true \
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
HERE
systemctl daemon-reload  && systemctl restart kube-scheduler && systemctl status kube-scheduler && systemctl enable kube-scheduler

七、安装docker(自己弄吧,文字太多拉,哈哈)

八、安装kubelet+haproxy
(1)node1

cat > /usr/lib/systemd//system/kubelet.service  << 'HERE'
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
--address=192.168.122.24 \
--hostname-override=192.168.122.24 \
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0 \
--experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
--cert-dir=/etc/kubernetes/ssl \
--hairpin-mode \
promiscuous-bridge \
--allow-privileged=true \
--serialize-image-pulls=false \
--logtostderr=true \
--cluster-dns=172.21.0.2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
HERE

(2)node2

cat > /usr/lib/systemd//system/kubelet.service  << 'HERE'
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
--address=192.168.122.25 \
--hostname-override=192.168.122.25 \
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0 \
--experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
--cert-dir=/etc/kubernetes/ssl \
--hairpin-mode \
promiscuous-bridge \
--allow-privileged=true \
--serialize-image-pulls=false \
--logtostderr=true \
--cluster-dns=172.21.0.2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target

HERE

(3)所有node 安装HAproxy,指定apiserver端口为127.0.0.1:6443

cat > /etc/haproxy/haproxy.cfg << 'HERE'
#---------------------------------------------------------------------
# Example configuration for a possible web application.  See the
# full configuration options online.
#
#   http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
#
#---------------------------------------------------------------------

#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    #
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    #
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #
    log         127.0.0.1 local3

    #      local2.*                 /var/log/haproxy.log
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon

    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats

#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000

#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
#frontend  main *:6443
# stats uri /haproxy
#   acl url_static       path_beg       -i /static /images /javascript /stylesheets
#   acl url_static       path_end       -i .jpg .gif .png .css .js

  #  use_backend static          if url_static
#    default_backend             k8s-apiserver

#---------------------------------------------------------------------
# static backend for serving up images, stylesheets and such
#---------------------------------------------------------------------
#backend static
#    balance     roundrobin
#    server      static 127.0.0.1:4331 check

#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
#backend k8s-apiserver
listen k8s-apiserver
bind 127.0.0.1:6443
    mode tcp   #tcp模式
    balance     roundrobin
    server  c1 192.168.122.21:6443 check  #此处轮寻三台api
    server  c2 192.168.122.22:6443 check
    server  c3 192.168.122.23:6443 check

#--------------------------------------------------------------------

listen localhost 0.0.0.0:8090
      mode http  
      transparent  
      stats refresh 30s
      stats uri /haproxy-stats
HERE

systemctl daemon-reload && systemctl restart haproxy kubelet && systemctl status kubelet && systemctl enable kubelet haproxy

九、安装kube-proxy
M1+M2+M3+node1+node2

cat > /usr/lib/systemd/system/kube-proxy.service << 'HERE'
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-proxy \
--bind-address=192.168.122.24 --hostname-override=192.168.122.24 --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig --cluster-cidr=172.20.0.0/16
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
HERE

更换--bind-address=192.168.122.24 --hostname-override=192.168.122.24 即可

十、master通过认证
master执行:
kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap
#通过所有集群认证
kubectl get csr
kubectl get csr | awk '/Pending/ {print $1}' | xargs kubectl certificate approve

创建pod就可以了
注意:
(1)~/.kube/config IP要换
(2)/etc/kubernetes/bootstrap.kubeconfig IP要换
(3)/etc/kubernetes/kube-proxy.kubeconfig  IP要换



运维网声明 1、欢迎大家加入本站运维交流群:群②:261659950 群⑤:202807635 群⑦870801961 群⑧679858003
2、本站所有主题由该帖子作者发表,该帖子作者与运维网享有帖子相关版权
3、所有作品的著作权均归原作者享有,请您和我们一样尊重他人的著作权等合法权益。如果您对作品感到满意,请购买正版
4、禁止制作、复制、发布和传播具有反动、淫秽、色情、暴力、凶杀等内容的信息,一经发现立即删除。若您因此触犯法律,一切后果自负,我们对此不承担任何责任
5、所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其内容的准确性、可靠性、正当性、安全性、合法性等负责,亦不承担任何法律责任
6、所有作品仅供您个人学习、研究或欣赏,不得用于商业或者其他用途,否则,一切后果均由您自己承担,我们对此不承担任何法律责任
7、如涉及侵犯版权等问题,请您及时通知我们,我们将立即采取措施予以解决
8、联系人Email:admin@iyunv.com 网址:www.yunweiku.com

所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其承担任何法律责任,如涉及侵犯版权等问题,请您及时通知我们,我们将立即处理,联系人Email:kefu@iyunv.com,QQ:1061981298 本贴地址:https://www.yunweiku.com/thread-448396-1-1.html 上篇帖子: 新浪微博大规模基于Docker的混合云应用实践 PPT 下篇帖子: kubernetes监控:grafana plugins IN kubernetes
您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

扫码加入运维网微信交流群X

扫码加入运维网微信交流群

扫描二维码加入运维网微信交流群,最新一手资源尽在官方微信交流群!快快加入我们吧...

扫描微信二维码查看详情

客服E-mail:kefu@iyunv.com 客服QQ:1061981298


QQ群⑦:运维网交流群⑦ QQ群⑧:运维网交流群⑧ k8s群:运维网kubernetes交流群


提醒:禁止发布任何违反国家法律、法规的言论与图片等内容;本站内容均来自个人观点与网络等信息,非本站认同之观点.


本站大部分资源是网友从网上搜集分享而来,其版权均归原作者及其网站所有,我们尊重他人的合法权益,如有内容侵犯您的合法权益,请及时与我们联系进行核实删除!



合作伙伴: 青云cloud

快速回复 返回顶部 返回列表