jxwjq 发表于 2018-9-16 06:35:49

kubernetes 1.8 安装脚本之Master-BravePro

#!/bin/bash  
#********************************************************************
  
#Author:      bravewang
  
#QQ:          6142553
  
##blog:       http://brave666.blog.51cto.com/
  
#Description: Kubernetes Master install
  
#Date:      2017-11-14
  
#********************************************************************
  
export host1=192.168.2.11
  
export host2=192.168.2.12
  
export host3=192.168.2.13
  
export host4=192.168.2.14
  
export host5=192.168.2.15
  
export zhuji="$host1 $host2 $host3"
  

  
catadmin-csr.json
  
{
  
"CN": "admin",
  
"hosts": [],
  
"key": {
  
    "algo": "rsa",
  
    "size": 2048
  
},
  
"names": [
  
    {
  
      "C": "CN",
  
      "ST": "BeiJing",
  
      "L": "BeiJing",
  
      "O": "system:masters",
  
      "OU": "System"
  
    }
  
]
  
}
  
EOF
  

  
catk8s-gencert.json
  
{
  
"signing": {
  
    "default": {
  
      "expiry": "87600h"
  
    },
  
    "profiles": {
  
      "kubernetes": {
  
      "usages": [
  
            "signing",
  
            "key encipherment",
  
            "server auth",
  
            "client auth"
  
      ],
  
      "expiry": "87600h"
  
      }
  
    }
  
}
  
}
  
EOF
  

  
catk8s-root-ca-csr.json
  
{
  
"CN": "kubernetes",
  
"key": {
  
    "algo": "rsa",
  
    "size": 4096
  
},
  
"names": [
  
    {
  
      "C": "CN",
  
      "ST": "BeiJing",
  
      "L": "BeiJing",
  
      "O": "k8s",
  
      "OU": "System"
  
    }
  
]
  
}
  
EOF
  

  
catkube-proxy-csr.json
  
{
  
"CN": "system:kube-proxy",
  
"hosts": [],
  
"key": {
  
    "algo": "rsa",
  
    "size": 2048
  
},
  
"names": [
  
    {
  
      "C": "CN",
  
      "ST": "BeiJing",
  
      "L": "BeiJing",
  
      "O": "k8s",
  
      "OU": "System"
  
    }
  
]
  
}
  
EOF
  

  
catkubernetes-csr.json
  
{
  
    "CN": "kubernetes",
  
    "hosts": [
  
      "127.0.0.1",
  
      "10.254.0.1",
  
      "$host1",
  
      "$host2",
  
      "$host3",
  
      "$host4",
  
      "$host5",
  
      "localhost",
  
      "kubernetes",
  
      "kubernetes.default",
  
      "kubernetes.default.svc",
  
      "kubernetes.default.svc.cluster",
  
      "kubernetes.default.svc.cluster.local"
  
    ],
  
    "key": {
  
      "algo": "rsa",
  
      "size": 2048
  
    },
  
    "names": [
  
      {
  
            "C": "CN",
  
            "ST": "BeiJing",
  
            "L": "BeiJing",
  
            "O": "k8s",
  
            "OU": "System"
  
      }
  
    ]
  
}
  
EOF
  

  
# 生成证书
  

  
cfssl gencert --initca=true k8s-root-ca-csr.json | cfssljson --bare k8s-root-ca
  

  
for targetName in kubernetes admin kube-proxy; do
  
cfssl gencert --ca k8s-root-ca.pem --ca-key k8s-root-ca-key.pem --config k8s-gencert.json --profile kubernetes $targetName-csr.json | cfssljson --bare $targetName
  
done
  

  
# 生成配置
  
export KUBE_APISERVER="https://127.0.0.1:6443"
  
export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
  
echo "Tokne: ${BOOTSTRAP_TOKEN}"
  

  
cattoken.csv
  
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
  
EOF
  

  
echo "Create kubelet bootstrapping kubeconfig..."
  
kubectl config set-cluster kubernetes \
  
--certificate-authority=k8s-root-ca.pem \
  
--embed-certs=true \
  
--server=${KUBE_APISERVER} \
  
--kubeconfig=bootstrap.kubeconfig
  
kubectl config set-credentials kubelet-bootstrap \
  
--token=${BOOTSTRAP_TOKEN} \
  
--kubeconfig=bootstrap.kubeconfig
  
kubectl config set-context default \
  
--cluster=kubernetes \
  
--user=kubelet-bootstrap \
  
--kubeconfig=bootstrap.kubeconfig
  
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
  

  
echo "Create kube-proxy kubeconfig..."
  
kubectl config set-cluster kubernetes \
  
--certificate-authority=k8s-root-ca.pem \
  
--embed-certs=true \
  
--server=${KUBE_APISERVER} \
  
--kubeconfig=kube-proxy.kubeconfig
  
kubectl config set-credentials kube-proxy \
  
--client-certificate=kube-proxy.pem \
  
--client-key=kube-proxy-key.pem \
  
--embed-certs=true \
  
--kubeconfig=kube-proxy.kubeconfig
  
kubectl config set-context default \
  
--cluster=kubernetes \
  
--user=kube-proxy \
  
--kubeconfig=kube-proxy.kubeconfig
  
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
  

  
# 生成高级审计配置
  
cataudit-policy.yaml
  
# Log all requests at the Metadata level.
  
apiVersion: audit.k8s.io/v1beta1
  
kind: Policy
  
rules:
  
- level: Metadata
  
EOF
  

  
# 分发并安装 rpm
  
#for IP in $zhuji; do
  
#    scp kubernetes*.rpm root@$IP:~;
  
#    ssh root@$IP yum install -y kubernetes*.rpm
  
#done
  

  
# 分发证书 设置权限
  
for IP in $zhuji;do
  
ssh root@$IP hostnamectl set-hostname Master${IP##*.}
  
ssh root@$IP mkdir /etc/kubernetes/ssl
  
scp *.pem root@$IP:/etc/kubernetes/ssl
  
scp *.kubeconfig token.csv audit-policy.yaml root@$IP:/etc/kubernetes
  
ssh root@$IP chown -R kube:kube /etc/kubernetes/ssl
  
ssh root@$IP mkdir -p /var/log/kube-audit /usr/libexec/kubernetes
  
ssh root@$IP chown -R kube:kube /var/log/kube-audit /usr/libexec/kubernetes
  
ssh root@$IP chmod -R 755 /var/log/kube-audit /usr/libexec/kubernetes
  
done
  

  
catmasterconfig
  
###
  
# kubernetes system config
  
#
  
# The following values are used to configure various aspects of all
  
# kubernetes services, including
  
#
  
#   kube-apiserver.service
  
#   kube-controller-manager.service
  
#   kube-scheduler.service
  
#   kubelet.service
  
#   kube-proxy.service
  
# logging to stderr means we get it in the systemd journal
  
KUBE_LOGTOSTDERR="--logtostderr=true"
  

  
# journal message level, 0 is debug
  
KUBE_LOG_LEVEL="--v=2"
  

  
# Should this cluster be allowed to run privileged docker containers
  
KUBE_ALLOW_PRIV="--allow-privileged=true"
  

  
# How the controller-manager, scheduler, and proxy find the apiserver
  
KUBE_MASTER="--master=http://127.0.0.1:8080"
  
EOF
  

  
apiconf()
  
{
  
catapiserver${IP##*.}
  
###
  
# kubernetes system config
  
#
  
# The following values are used to configure the kube-apiserver
  
#
  

  
# The address on the local server to listen to.
  
KUBE_API_ADDRESS="--advertise-address=$IP --insecure-bind-address=127.0.0.1 --bind-address=$IP"
  

  
# The port on the local server to listen on.
  
KUBE_API_PORT="--insecure-port=8080 --secure-port=6443"
  

  
# Port minions listen on
  
# KUBELET_PORT="--kubelet-port=10250"
  

  
# Comma separated list of nodes in the etcd cluster
  
KUBE_ETCD_SERVERS="--etcd-servers=https://$host1:2379,https://$host2:2379,https://$host3:2379"
  

  
# Address range to use for services
  
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
  

  
# default admission control policies
  
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction"
  

  
# Add your own!
  
KUBE_API_ARGS="--authorization-mode=RBAC,Node \\
  
               --anonymous-auth=false \\
  
               --kubelet-https=true \\
  
               --enable-bootstrap-token-auth \\
  
               --token-auth-file=/etc/kubernetes/token.csv \\
  
               --service-node-port-range=30000-50000 \\
  
               --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \\
  
               --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \\
  
               --client-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \\
  
               --service-account-key-file=/etc/kubernetes/ssl/k8s-root-ca.pem \\
  
               --etcd-quorum-read=true \\
  
               --storage-backend=etcd3 \\
  
               --etcd-cafile=/etc/etcd/ssl/etcd-root-ca.pem \\
  
               --etcd-certfile=/etc/etcd/ssl/etcd.pem \\
  
               --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\
  
               --enable-swagger-ui=true \\
  
               --apiserver-count=3 \\
  
               --audit-policy-file=/etc/kubernetes/audit-policy.yaml \\
  
               --audit-log-maxage=30 \\
  
               --audit-log-maxbackup=3 \\
  
               --audit-log-maxsize=100 \\
  
               --audit-log-path=/var/log/kube-audit/audit.log \\
  
               --event-ttl=1h"
  
EOF
  
}
  

  
catcontroller-manager
  
###
  
# The following values are used to configure the kubernetes controller-manager
  

  
# defaults from config and apiserver should be adequate
  

  
# Add your own!
  
KUBE_CONTROLLER_MANAGER_ARGS="--address=0.0.0.0 \\
  
                              --service-cluster-ip-range=10.254.0.0/16 \\
  
                              --cluster-name=kubernetes \\
  
                              --cluster-signing-cert-file=/etc/kubernetes/ssl/k8s-root-ca.pem \\
  
                              --cluster-signing-key-file=/etc/kubernetes/ssl/k8s-root-ca-key.pem \\
  
                              --service-account-private-key-file=/etc/kubernetes/ssl/k8s-root-ca-key.pem \\
  
                              --root-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \\
  
                              --leader-elect=true \\
  
                              --node-monitor-grace-period=40s \\
  
                              --node-monitor-period=5s \\
  
                              --pod-eviction-timeout=5m0s"
  

  
EOF
  

  
catscheduler
  
###
  
# kubernetes scheduler config
  

  
# default config should be adequate
  

  
# Add your own!
  
KUBE_SCHEDULER_ARGS="--leader-elect=true --address=0.0.0.0"
  
EOF
  

  

  
for IP in $zhuji ;do
  
apiconf
  
scp apiserver${IP##*.} root@$IP:/etc/kubernetes/apiserver
  
scp masterconfig root@$IP:/etc/kubernetes/config
  
scp controller-manager root@$IP:/etc/kubernetes/controller-manager
  
scp scheduler root@$IP:/etc/kubernetes/scheduler
  

  
ssh root@$IP systemctl daemon-reload
  
sleep 2
  
ssh root@$IP systemctl start kube-apiserver
  
sleep 2
  
ssh root@$IP systemctl start kube-controller-manager
  
sleep 2
  
ssh root@$IP systemctl start kube-scheduler
  
sleep 2
  
ssh root@$IP systemctl enable kube-apiserver
  
sleep 2
  
ssh root@$IP systemctl enable kube-controller-manager
  
sleep 2
  
ssh root@$IP systemctl enable kube-scheduler
  
done
  

  
ssh root@$host1 kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap


页: [1]
查看完整版本: kubernetes 1.8 安装脚本之Master-BravePro