terte 发表于 2018-4-9 11:08:27

kubernetes1.9 二进制安装



一、环境准备:
m1:192.168.122.21 软件: etcd flanneld kube-apiserver kube-controller-manager kube-scheduler kube-proxy
m2:192.168.122.22 软件: etcd flanneld kube-apiserver kube-controller-manager kube-scheduler kube-proxy
m3:192.168.122.23 软件: etcd flanneld kube-apiserver kube-controller-manager kube-scheduler kube-proxy
node1:192.168.122.24 软件: flanneld kube-proxy kubelet docker
node2:192.168.122.25 软件: flanneld kube-proxy kubelet docker

二、配置ssl
(1)生成的 CA 证书和秘钥文件如下:
ca-key.pem
ca.pem
kubernetes-key.pem
kubernetes.pem
kube-proxy.pem
kube-proxy-key.pem
admin.pem
admin-key.pem

(2)使用证书的组件如下:
etcd:使用 ca.pem、kubernetes-key.pem、kubernetes.pem;
kube-apiserver:使用 ca.pem、kubernetes-key.pem、kubernetes.pem;
kubelet:使用 ca.pem;
kube-proxy:使用 ca.pem、kube-proxy-key.pem、kube-proxy.pem;
kubectl:使用 ca.pem、admin-key.pem、admin.pem;

(3)创建 CA 配置文件

mkdir /opt/ssl
cd /opt/ssl
cfssl print-defaults config > config.json
cfssl print-defaults csr > csr.json
cat > ca-config.json <<'HERE'
{
"signing": {
    "default": {
      "expiry": "8760h"
    },
    "profiles": {
      "kubernetes": {
      "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
      ],
      "expiry": "8760h"
      }
    }
}
}
HERE

(4)创建 CA 证书签名请求

cat > ca-csr.json << 'HERE'
{
"CN": "kubernetes",
"key": {
    "algo": "rsa",
    "size": 2048
},
"names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
]
}
HERE
cfssl gencert -initca ca-csr.json | cfssljson -bare ca

(5)创建 Kubernetes 证书

cat > kubernetes-csr.json << 'HERE'
{
    "CN": "kubernetes",
    "hosts": [
      "127.0.0.1",
      "192.168.122.21",
      "192.168.122.22",
      "192.168.122.23",
      "192.168.122.24",
      "192.168.122.25",
      "192.168.122.100",
      "172.21.0.1",
      "172.20.0.1",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
      "algo": "rsa",
      "size": 2048
    },
    "names": [
      {
            "C": "CN",
            "ST": "BeiJing",
            "L": "BeiJing",
            "O": "k8s",
            "OU": "System"
      }
    ]
}
HERE
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes

(6)创建 Admin 证书

cat > admin-csr.json << 'HERE'
{
"CN": "admin",
"hosts": [],
"key": {
    "algo": "rsa",
    "size": 2048
},
"names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "system:masters",
      "OU": "System"
    }
]
}
HERE
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

(7)创建 Kube-Proxy 证书

cat > kube-proxy-csr.json << 'HERE'
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
    "algo": "rsa",
    "size": 2048
},
"names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
]
}
HERE
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kuberneteskube-proxy-csr.json | cfssljson -bare kube-proxy

(8)分发证书
mkdir -p /etc/kubernetes/ssl
cp *.pem /etc/kubernetes/ssl
scp /etc/kubernetes/ssl m2+m3+node1+node2

三、etcd服务器
(1)M1

mkdir /var/lib/etcd
cat > /usr/lib/systemd//system/etcd.service << 'HERE'

Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos


Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
--name 'etcd-host1' \
--cert-file=/etc/kubernetes/ssl/kubernetes.pem \
--key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
--peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
--peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
--trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
--peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
--initial-advertise-peer-urls https://192.168.122.21:2380 \
--listen-peer-urls https://192.168.122.21:2380 \
--listen-client-urls https://192.168.122.21:2379,http://127.0.0.1:2379 \
--advertise-client-urls https://192.168.122.21:2379 \
--initial-cluster-token etcd-cluster-0 \
--initial-cluster etcd-host1=https://192.168.122.21:2380,etcd-host2=https://192.168.122.22:2380,etcd-host3=https://192.168.122.23:2380 \
--initial-cluster-state new \
--data-dir /var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536


WantedBy=multi-user.target
HERE

(2)M2

mkdir /var/lib/etcd
cat > /usr/lib/systemd//system/etcd.service << 'HERE'

Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos


Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
--name 'etcd-host2' \
--cert-file=/etc/kubernetes/ssl/kubernetes.pem \
--key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
--peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
--peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
--trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
--peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
--initial-advertise-peer-urls https://192.168.122.22:2380 \
--listen-peer-urls https://192.168.122.22:2380 \
--listen-client-urls https://192.168.122.22:2379,http://127.0.0.1:2379 \
--advertise-client-urls https://192.168.122.22:2379 \
--initial-cluster-token etcd-cluster-0 \
--initial-cluster etcd-host1=https://192.168.122.21:2380,etcd-host2=https://192.168.122.22:2380,etcd-host3=https://192.168.122.23:2380 \
--initial-cluster-state new \
--data-dir /var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536


WantedBy=multi-user.target
HERE

(3)M3

mkdir /var/lib/etcd
cat > /usr/lib/systemd//system/etcd.service << 'HERE'

Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos


Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
--name 'etcd-host3' \
--cert-file=/etc/kubernetes/ssl/kubernetes.pem \
--key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
--peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
--peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
--trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
--peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
--initial-advertise-peer-urls https://192.168.122.23:2380 \
--listen-peer-urls https://192.168.122.23:2380 \
--listen-client-urls https://192.168.122.23:2379,http://127.0.0.1:2379 \
--advertise-client-urls https://192.168.122.23:2379 \
--initial-cluster-token etcd-cluster-0 \
--initial-cluster etcd-host1=https://192.168.122.21:2380,etcd-host2=https://192.168.122.22:2380,etcd-host3=https://192.168.122.23:2380 \
--initial-cluster-state new \
--data-dir /var/lib/etcd

Restart=on-failure
RestartSec=5
LimitNOFILE=65536


WantedBy=multi-user.target
HERE

(4)M1+M2+M3

systemctl stop firewalld &&systemctl daemon-reload && systemctl stop firewalld && systemctl restart etcd && systemctl status etcd
etcdctl --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/kubernetes/ssl/kubernetes.pem --key-file=/etc/kubernetes/ssl/kubernetes-key.pem --endpoints "https://192.168.122.21:2379,https://192.168.122.22:2379,https://192.168.122.23:2379" cluster-health

四、安装kube-apiserver
(1)M1

cat > /usr/lib/systemd/system/kube-apiserver.service << 'HERE'

Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

ExecStart=/usr/local/bin/kube-apiserver \
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \
--advertise-address=192.168.122.21 \
--bind-address=192.168.122.21 \
--insecure-bind-address=127.0.0.1 \
--kubelet-https=true \
--runtime-config=rbac.authorization.k8s.io/v1beta1 \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth \
--service-cluster-ip-range=172.21.0.0/16 \
--service-node-port-range=3000-9000 \
--etcd-servers=https://192.168.122.21:2379,https://192.168.122.22:2379,https://192.168.122.23:2379 \
--enable-swagger-ui=true \
--allow-privileged=true \
--apiserver-count=3 \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/lib/audit.log \
--event-ttl=1h \
--token-auth-file=/etc/kubernetes/token.csv\
--tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/etc/kubernetes/ssl/ca.pem \
--etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \
--etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem
--v=2
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

WantedBy=multi-user.target
HERE

(2)M2

cat > /usr/lib/systemd/system/kube-apiserver.service << 'HERE'

Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

ExecStart=/usr/local/bin/kube-apiserver \
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \
--advertise-address=192.168.122.22 \
--bind-address=192.168.122.22 \
--insecure-bind-address=127.0.0.1 \
--kubelet-https=true \
--runtime-config=rbac.authorization.k8s.io/v1beta1 \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth \
--service-cluster-ip-range=172.21.0.0/16 \
--service-node-port-range=3000-9000 \
--etcd-servers=https://192.168.122.21:2379,https://192.168.122.22:2379,https://192.168.122.23:2379 \
--enable-swagger-ui=true \
--allow-privileged=true \
--apiserver-count=3 \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/lib/audit.log \
--event-ttl=1h \
--token-auth-file=/etc/kubernetes/token.csv\
--tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/etc/kubernetes/ssl/ca.pem \
--etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \
--etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem
--v=2
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

WantedBy=multi-user.target
HERE

(3)M3

cat > /usr/lib/systemd/system/kube-apiserver.service << 'HERE'

Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

ExecStart=/usr/local/bin/kube-apiserver \
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \
--advertise-address=192.168.122.23 \
--bind-address=192.168.122.23 \
--insecure-bind-address=127.0.0.1 \
--kubelet-https=true \
--runtime-config=rbac.authorization.k8s.io/v1beta1 \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth \
--service-cluster-ip-range=172.21.0.0/16 \
--service-node-port-range=3000-9000 \
--etcd-servers=https://192.168.122.21:2379,https://192.168.122.22:2379,https://192.168.122.23:2379 \
--enable-swagger-ui=true \
--allow-privileged=true \
--apiserver-count=3 \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/lib/audit.log \
--event-ttl=1h \
--token-auth-file=/etc/kubernetes/token.csv\
--tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/etc/kubernetes/ssl/ca.pem \
--etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \
--etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem
--v=2
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

WantedBy=multi-user.target
HERE

(4)M1+M2+M3
systemctl daemon-reload && systemctl restart kube-apiserver && systemctl status kube-apiserver && systemctl enable kube-apiserver

五、kube-controller-manager安装
(1)M1+M2+M3

cat > /usr/lib/systemd/system/kube-controller-manager.service << 'HERE'

Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

ExecStart=/usr/local/bin/kube-controller-manager \
--address=127.0.0.1 \
--master=http://127.0.0.1:8080 \
--allocate-node-cidrs=true \
--service-cluster-ip-range=172.21.0.0/16 \
--cluster-cidr=172.20.0.0/16 \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem\
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
--root-ca-file=/etc/kubernetes/ssl/ca.pem \
--leader-elect=true \
--v=2
Restart=on-failure
RestartSec=5

WantedBy=multi-user.target
HERE

(2)M1+M2+M3
systemctl daemon-reload && systemctl restart kube-controller-manager && systemctl status kube-controller-manager && systemctl enable kube-controller-manager

六、kuber-scheduler安装
(1)M1+M2+M3

cat > /usr/lib/systemd/system/kube-scheduler.service << 'HERE'

Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

ExecStart=/usr/local/bin/kube-scheduler \
--address=127.0.0.1 \
--master=http://127.0.0.1:8080 \
--leader-elect=true \
--v=2
Restart=on-failure
RestartSec=5

WantedBy=multi-user.target
HERE
systemctl daemon-reload&& systemctl restart kube-scheduler && systemctl status kube-scheduler && systemctl enable kube-scheduler

七、安装docker(自己弄吧,文字太多拉,哈哈)

八、安装kubelet+haproxy
(1)node1

cat > /usr/lib/systemd//system/kubelet.service<< 'HERE'

Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
--address=192.168.122.24 \
--hostname-override=192.168.122.24 \
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0 \
--experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
--cert-dir=/etc/kubernetes/ssl \
--hairpin-mode \
promiscuous-bridge \
--allow-privileged=true \
--serialize-image-pulls=false \
--logtostderr=true \
--cluster-dns=172.21.0.2
Restart=on-failure
RestartSec=5

WantedBy=multi-user.target
HERE

(2)node2

cat > /usr/lib/systemd//system/kubelet.service<< 'HERE'

Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
--address=192.168.122.25 \
--hostname-override=192.168.122.25 \
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0 \
--experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
--cert-dir=/etc/kubernetes/ssl \
--hairpin-mode \
promiscuous-bridge \
--allow-privileged=true \
--serialize-image-pulls=false \
--logtostderr=true \
--cluster-dns=172.21.0.2
Restart=on-failure
RestartSec=5

WantedBy=multi-user.target

HERE

(3)所有node 安装HAproxy,指定apiserver端口为127.0.0.1:6443

cat > /etc/haproxy/haproxy.cfg << 'HERE'
#---------------------------------------------------------------------
# Example configuration for a possible web application.See the
# full configuration options online.
#
#   http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
#
#---------------------------------------------------------------------

#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    #
    # 1) configure syslog to accept network log events.This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    #
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #
    log         127.0.0.1 local3

    #      local2.*               /var/log/haproxy.log
    chroot      /var/lib/haproxy
    pidfile   /var/run/haproxy.pid
    maxconn   4000
    user      haproxy
    group       haproxy
    daemon

    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats

#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
    mode                  http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries               3
    timeout http-request    10s
    timeout queue         1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check         10s
    maxconn               3000

#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
#frontendmain *:6443
# stats uri /haproxy
#   acl url_static       path_beg       -i /static /images /javascript /stylesheets
#   acl url_static       path_end       -i .jpg .gif .png .css .js

#use_backend static          if url_static
#    default_backend             k8s-apiserver

#---------------------------------------------------------------------
# static backend for serving up images, stylesheets and such
#---------------------------------------------------------------------
#backend static
#    balance   roundrobin
#    server      static 127.0.0.1:4331 check

#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
#backend k8s-apiserver
listen k8s-apiserver
bind 127.0.0.1:6443
    mode tcp   #tcp模式
    balance   roundrobin
    serverc1 192.168.122.21:6443 check#此处轮寻三台api
    serverc2 192.168.122.22:6443 check
    serverc3 192.168.122.23:6443 check

#--------------------------------------------------------------------

listen localhost 0.0.0.0:8090
      mode http
      transparent
      stats refresh 30s
      stats uri /haproxy-stats
HERE

systemctl daemon-reload && systemctl restart haproxy kubelet && systemctl status kubelet && systemctl enable kubelet haproxy

九、安装kube-proxy
M1+M2+M3+node1+node2

cat > /usr/lib/systemd/system/kube-proxy.service << 'HERE'

Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target


ExecStart=/usr/local/bin/kube-proxy \
--bind-address=192.168.122.24 --hostname-override=192.168.122.24 --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig --cluster-cidr=172.20.0.0/16
Restart=on-failure
LimitNOFILE=65536


WantedBy=multi-user.target
HERE

更换--bind-address=192.168.122.24 --hostname-override=192.168.122.24 即可

十、master通过认证
master执行:
kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap
#通过所有集群认证
kubectl get csr
kubectl get csr | awk '/Pending/ {print $1}' | xargs kubectl certificate approve

创建pod就可以了
注意:
(1)~/.kube/config IP要换
(2)/etc/kubernetes/bootstrap.kubeconfig IP要换
(3)/etc/kubernetes/kube-proxy.kubeconfig  IP要换


页: [1]
查看完整版本: kubernetes1.9 二进制安装