完整的二进制安装Kubernetes高可用集群

一、服务器配置环境

192.168.20.41 k8s-master

192.168.20.42 k8s-node1

192.168.20.43 k8s-node2

 

二、master节点上配置证书

1、准备cfssl证书生成工具

在Master节点

[root@k8s-master ~]#more /etc/hosts

192.168.20.41 k8s-master

192.168.20.42 k8s-node1

192.168.20.43 k8s-node2

192.168.20.41 etcd-1

192.168.20.42 etcd-2

192.168.20.43 etcd-3

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64

wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64

wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64

[root@k8s-master ~]# chmod +x cfssl_linux-amd64  cfssljson_linux-amd64  cfssl-certinfo_linux-amd64

[root@k8s-master ~]# mv cfssl_linux-amd64 /usr/local/bin/cfssl

[root@k8s-master ~]# mv cfssljson_linux-amd64 /usr/local/bin/cfssljson

[root@k8s-master ~]# mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo

 

2、生成Etcd证书

1. 签证书颁发机构(CA)

创建工作目录:

[root@k8s-master ~]# mkdir -p ~/tsl/{etcd,k8s}

[root@k8s-master ~]# cd tsl/etcd/

[root@k8s-master etcd]# cat > ca-config.json << EOF

> {

>   "signing": {

>     "default": {

>       "expiry": "87600h"

>     },

>     "profiles": {

>       "www": {

>          "expiry": "87600h",

>          "usages": [

>             "signing",

>             "key encipherment",

>             "server auth",

>             "client auth"

>         ]

>       }

>     }

>   }

> }

> EOF

[root@k8s-master etcd]# cat > ca-csr.json << EOF

> {

>     "CN": "etcd CA",

>     "key": {

>         "algo": "rsa",

>         "size": 2048

>     },

>     "names": [

>         {

>             "C": "CN",

>             "L": "Beijing",

>             "ST": "Beijing"

>         }

>     ]

> }

> EOF

 

3、生成证书:

[root@k8s-master etcd]# ls

ca-config.json  ca-csr.json

[root@k8s-master etcd]# cfssl gencert -initca ca-csr.json |cfssljson -bare ca -

2020/12/27 14:10:26 [INFO] generating a new CA key and certificate from CSR

2020/12/27 14:10:26 [INFO] generate received request

2020/12/27 14:10:26 [INFO] received CSR

2020/12/27 14:10:26 [INFO] generating key: rsa-2048

2020/12/27 14:10:26 [INFO] encoded CSR

2020/12/27 14:10:27 [INFO] signed certificate with serial number 695386223963361134699949235237569629999190885216

 

3.1、使用自签CA签发Etcd HTTPS证书

[root@k8s-master etcd]# cat > server-csr.json << EOF

{

    "CN": "etcd",

    "hosts": [

    "192.168.20.41",

    "192.168.20.42",

    "192.168.20.43"

    ],

    "key": {

        "algo": "rsa",

        "size": 2048

    },

    "names": [

        {

            "C": "CN",

            "L": "BeiJing",

            "ST": "BeiJing"

        }

    ]

}

EOF

3.2、生成证书

[root@k8s-master etcd]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server

2020/12/27 14:25:34 [INFO] generate received request

2020/12/27 14:25:34 [INFO] received CSR

2020/12/27 14:25:34 [INFO] generating key: rsa-2048

2020/12/27 14:25:34 [INFO] encoded CSR

2020/12/27 14:25:34 [INFO] signed certificate with serial number 647470747500960226606663959217424414433378131770

2020/12/27 14:25:34 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for

websites. For more information see the Baseline Requirements for the Issuance and Management

of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);

specifically, section 10.2.3 ("Information Requirements").

三、部署Etcd集群

1、解压文件

[root@k8s-master ~]# mkdir /opt/etcd/{bin,cfg,ssl} -p

[root@k8s-master ~]# tar -zxvf etcd-v3.3.13-linux-amd64.tar.gz

配置启动文件

[root@k8s-master etcd-v3.3.13-linux-amd64]# ll

总用量 29776

drwxr-xr-x 10 mysql mysql     4096 5月   3 2019 Documentation

-rwxr-xr-x  1 mysql mysql 16927136 5月   3 2019 etcd

-rwxr-xr-x  1 mysql mysql 13498880 5月   3 2019 etcdctl

-rw-r--r--  1 mysql mysql    38864 5月   3 2019 README-etcdctl.md

-rw-r--r--  1 mysql mysql     7262 5月   3 2019 README.md

-rw-r--r--  1 mysql mysql     7855 5月   3 2019 READMEv2-etcdctl.md

 

[root@k8s-master etcd-v3.3.13-linux-amd64]# mv etcd /opt/etcd/bin/

[root@k8s-master etcd-v3.3.13-linux-amd64]# mv etcdctl /opt/etcd/bin/

 

2、创建etcd配置文件

cat > /opt/etcd/cfg/etcd.conf << EOF

#[Member]

ETCD_NAME="etcd-1"

ETCD_DATA_DIR="/var/lib/etcd/default.etcd"

ETCD_LISTEN_PEER_URLS="https://192.168.20.41:2380"

ETCD_LISTEN_CLIENT_URLS="https://192.168.20.41:2379"

#[Clustering]

ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.20.41:2380"

ETCD_ADVERTISE_CLIENT_URLS="https://192.168.20.41:2379"

ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.20.41:2380,etcd-2=https://192.168.20.42:2380,etcd-3=https://192.168.20.43:2380"

ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"

ETCD_INITIAL_CLUSTER_STATE="new"

EOF

3、systemd管理etcd

cat > /usr/lib/systemd/system/etcd.service << EOF

[Unit]

Description=Etcd Server

After=network.target

After=network-online.target

Wants=network-online.target

[Service]

Type=notify

EnvironmentFile=/opt/etcd/cfg/etcd.conf

ExecStart=/opt/etcd/bin/etcd \

--cert-file=/opt/etcd/ssl/server.pem \

--key-file=/opt/etcd/ssl/server-key.pem \

--peer-cert-file=/opt/etcd/ssl/server.pem \

--peer-key-file=/opt/etcd/ssl/server-key.pem \

--trusted-ca-file=/opt/etcd/ssl/ca.pem \

--peer-trusted-ca-file=/opt/etcd/ssl/ca.pem \

--logger=zap

Restart=on-failure

LimitNOFILE=65536

[Install]

WantedBy=multi-user.target

EOF

[root@k8s-master etcd-v3.3.13-linux-amd64]# more /usr/lib/systemd/system/etcd.service

[Unit]

Description=Etcd Server

After=network.target

After=network-online.target

Wants=network-online.target

[Service]

Type=notify

EnvironmentFile=/opt/etcd/cfg/etcd.conf

ExecStart=/opt/etcd/bin/etcd --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --peer-cert-fi

le=/opt/etcd/ssl/server.pem --peer-key-file=/opt/etcd/ssl/server-key.pem --trusted-ca-file=/opt/etcd/ssl/ca.pem --peer-t

rusted-ca-file=/opt/etcd/ssl/ca.pem --logger=zap

Restart=on-failure

LimitNOFILE=65536

[Install]

WantedBy=multi-user.target

4、拷贝刚生成的证书到etcd路径下

[root@k8s-master ~]# cp /root/tsl/etcd/ca*pem /opt/etcd/ssl/

[root@k8s-master ~]# cp /root/tsl/etcd/server*pem /opt/etcd/ssl/

[root@k8s-master ~]# cd /opt/etcd/ssl/

[root@k8s-master ssl]# ll

5、把etcd的配置文件分发到其它两个节点(etcd-2,etcd-3)

[root@k8s-master ~]# scp -r /opt/etcd/ root@192.168.20.42:/opt/

6、etcd-2操作:

完整的二进制安装Kubernetes高可用集群


[root@k8s-node1 ~]# more /usr/lib/systemd/system/etcd.service

[Unit]

Description=Etcd Server

After=network.target

After=network-online.target

Wants=network-online.target

[Service]

Type=notify

EnvironmentFile=/opt/etcd/cfg/etcd.conf

ExecStart=/opt/etcd/bin/etcd  --name=etcd-2 --data-dir=/var/lib/etcd/default.etcd --listen-peer-urls=https://192.168.20.

42:2380 --listen-client-urls=https://192.168.20.42:2379 --advertise-client-urls=https://192.168.20.42:2379 --initial-adv

ertise-peer-urls=https://192.168.20.42:2380 --initial-cluster=etcd-1=https://192.168.20.41:2380,etcd-2=https://192.168.2

0.42:2380,etcd-3=https://192.168.20.43:2380  --initial-cluster-token=etcd-cluster --initial-cluster-state=new --cert-fil

e=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --peer-cert-file=/opt/etcd/ssl/server.pem --peer-key-

file=/opt/etcd/ssl/server-key.pem --trusted-ca-file=/opt/etcd/ssl/ca.pem --peer-trusted-ca-file=/opt/etcd/ssl/ca.pem

Restart=on-failure

LimitNOFILE=65536

[Install]

WantedBy=multi-user.target

完整的二进制安装Kubernetes高可用集群

同理etcd-3 也和etcd-2操作一样

完整的二进制安装Kubernetes高可用集群

完整的二进制安装Kubernetes高可用集群

7、查看集群状态

[root@k8s-master ~]# ETCDCTL_API=3 /opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.20.41:2379,https://192.168.20.42:2379,https://192.168.20.43:2379" endpoint health

https://192.168.20.41:2379 is healthy: successfully committed proposal: took = 8.927579ms

https://192.168.20.43:2379 is healthy: successfully committed proposal: took = 4.209997ms

https://192.168.20.42:2379 is healthy: successfully committed proposal: took = 4.947391ms

完整的二进制安装Kubernetes高可用集群

####################### 以上etcd 集群安装完成 #############################

四、安装Docker(所有节点都要安装)

        yum -y install docker-ce


五、部署Master Node

生成kube-apiserver证书

1. 签证书颁发机构(CA)

[root@k8s-master k8s]# pwd

/root/tls/k8s

cat > ca-config.json << EOF

{

  "signing": {

    "default": {

      "expiry": "87600h"

    },

    "profiles": {

      "kubernetes": {

         "expiry": "87600h",

         "usages": [

            "signing",

            "key encipherment",

            "server auth",

            "client auth"

        ]

      }

    }

  }

}

EOF

cat > ca-csr.json << EOF

{

    "CN": "kubernetes",

    "key": {

        "algo": "rsa",

        "size": 2048

    },

    "names": [

        {

            "C": "CN",

            "L": "Beijing",

            "ST": "Beijing",

            "O": "k8s",

            "OU": "System"

        }

    ]

}

EOF

生成证书:

[root@k8s-master k8s~]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

[root@k8s-master k8s ~]# ls *pem

ca-key.pem  ca.pem

1. 使用自签CA签发kube-apiserver HTTPS证书

创建证书申请文件:

[root@k8s-master k8s]# pwd

/root/tls/k8s

cat > server-csr.json << EOF

{

    "CN": "kubernetes",

    "hosts": [

      "10.0.0.1",

      "127.0.0.1",

      "192.168.20.41",

      "192.168.20.42",

      "192.168.20.43",

      "kubernetes",

      "kubernetes.default",

      "kubernetes.default.svc",

      "kubernetes.default.svc.cluster",

      "kubernetes.default.svc.cluster.local"

    ],

    "key": {

        "algo": "rsa",

        "size": 2048

    },

    "names": [

        {

            "C": "CN",

            "L": "BeiJing",

            "ST": "BeiJing",

            "O": "k8s",

            "OU": "System"

        }

    ]

}

EOF

生成证书:

[root@k8s-master k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server

[root@k8s-master k8s]# ls server*pem

server-key.pem  server.pem

完整的二进制安装Kubernetes高可用集群

六、部署kube-apiserver

[root@k8s-master ~]# mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs}

[root@k8s-master ~]# tar -zxvf kubernetes-server-linux-amd64.tar.gz

[root@k8s-master bin]# pwd

/root/kubernetes/server/bin

[root@k8s-master bin]# cp kube-apiserver kube-scheduler kube-controller-manager /opt/kubernetes/bin

[root@k8s-master bin]# cp kubectl /usr/bin/

1. 创建配置文件

cat > /opt/kubernetes/cfg/kube-apiserver.conf << EOF

KUBE_APISERVER_OPTS="--logtostderr=false \\

--v=2 \\

--log-dir=/opt/kubernetes/logs \\

--etcd-servers=https://192.168.20.41:2379,https://192.168.20.42:2379,https://192.168.20.33:2379 \\

--bind-address=192.168.20.41 \\

--secure-port=6443 \\

--advertise-address=192.168.20.41 \\

--allow-privileged=true \\

--service-cluster-ip-range=10.0.0.0/24 \\

--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\

--authorization-mode=RBAC,Node \\

--enable-bootstrap-token-auth=true \\

--token-auth-file=/opt/kubernetes/cfg/token.csv \\

--service-node-port-range=30000-32767 \\

--kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \\

--kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \\

--tls-cert-file=/opt/kubernetes/ssl/server.pem  \\

--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\

--client-ca-file=/opt/kubernetes/ssl/ca.pem \\

--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\

--etcd-cafile=/opt/etcd/ssl/ca.pem \\

--etcd-certfile=/opt/etcd/ssl/server.pem \\

--etcd-keyfile=/opt/etcd/ssl/server-key.pem \\

--audit-log-maxage=30 \\

--audit-log-maxbackup=3 \\

--audit-log-maxsize=100 \\

--audit-log-path=/opt/kubernetes/logs/k8s-audit.log"

EOF

2.拷贝刚生成的证书

把刚才生成的证书拷贝到配置文件中的路径:

[root@k8s-master ~]# cp tls/k8s/ca*pem tls/k8s/server*pem /opt/kubernetes/ssl/

 

3. 配置文件中token文件

[root@k8s-master ~]# cat > /opt/kubernetes/cfg/token.csv << EOF

> c47ffb939f5ca36231d9e3121a252940,kubelet-bootstrap,10001,"system:node-bootstrapper"

> EOF

4. systemd管理apiserver

cat > /usr/lib/systemd/system/kube-apiserver.service << EOF

[Unit]

Description=Kubernetes API Server

Documentation=https://github.com/kubernetes/kubernetes

[Service]

EnvironmentFile=/opt/kubernetes/cfg/kube-apiserver.conf

ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS

Restart=on-failure

[Install]

WantedBy=multi-user.target

EOF

5. 启动并设置开机启动

[root@k8s-master ~]# systemctl daemon-reload

[root@k8s-master ~]# systemctl start kube-apiserver

[root@k8s-master ~]# systemctl enable kube-apiserver

完整的二进制安装Kubernetes高可用集群

1. 授权kubelet-bootstrap用户允许请求证书

[root@k8s-master ~]# kubectl create clusterrolebinding kubelet-bootstrap \

> --clusterrole=system:node-bootstrapper \

> --user=kubelet-bootstrap

clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created

七、部署kube-controller-manager

1. 创建配置文件

cat > /opt/kubernetes/cfg/kube-controller-manager.conf << EOF

KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\

--v=2 \\

--log-dir=/opt/kubernetes/logs \\

--leader-elect=true \\

--master=127.0.0.1:8080 \\

--bind-address=127.0.0.1 \\

--allocate-node-cidrs=true \\

--cluster-cidr=10.244.0.0/16 \\

--service-cluster-ip-range=10.0.0.0/24 \\

--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\

--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem  \\

--root-ca-file=/opt/kubernetes/ssl/ca.pem \\

--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\

--experimental-cluster-signing-duration=87600h0m0s"

EOF

2.systemd管理controller-manager

cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF

[Unit]

Description=Kubernetes Controller Manager

Documentation=https://github.com/kubernetes/kubernetes

[Service]

EnvironmentFile=/opt/kubernetes/cfg/kube-controller-manager.conf

ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS

Restart=on-failure

[Install]

WantedBy=multi-user.target

EOF

3.启动并设置开机启动

[root@k8s-master ~]# systemctl daemon-reload

[root@k8s-master ~]# systemctl start kube-controller-manager

[root@k8s-master ~]# systemctl enable kube-controller-manager

八、部署kube-scheduler

1. 创建配置文件

cat > /opt/kubernetes/cfg/kube-scheduler.conf << EOF

KUBE_SCHEDULER_OPTS="--logtostderr=false \

--v=2 \

--log-dir=/opt/kubernetes/logs \

--leader-elect \

--master=127.0.0.1:8080 \

--bind-address=127.0.0.1"

EOF

2.systemd管理scheduler

cat > /usr/lib/systemd/system/kube-scheduler.service << EOF

[Unit]

Description=Kubernetes Scheduler

Documentation=https://github.com/kubernetes/kubernetes

[Service]

EnvironmentFile=/opt/kubernetes/cfg/kube-scheduler.conf

ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS

Restart=on-failure

[Install]

WantedBy=multi-user.target

EOF

3.启动并设置开机启动

[root@k8s-master ~]# systemctl daemon-reload

[root@k8s-master ~]# systemctl start kube-scheduler

[root@k8s-master ~]# systemctl enable kube-scheduler

完整的二进制安装Kubernetes高可用集群

4.查看集群状态

[root@k8s-master ~]# kubectl get cs

NAME         ERROR        STATUS      MESSAGE                                                     

scheduler                         Healthy     ok     

etcd-0                              Healthy     {"health":"true"}           

controller-manager         Healthy     ok   

etcd-1                              Healthy     {"health":"true"}

etcd-2                              Healthy     {"health":"true"} 

九、部署Worker Node

在Master Node上操作,即同时作为Worker Node

1.创建工作目录并拷贝二进制文件

[root@k8s-master ~]# mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs}

从master节点拷贝:

[root@k8s-master ~]# cd kubernetes/server/bin/

[root@k8s-master bin]# cp kubelet kube-proxy /opt/kubernetes/bin

完整的二进制安装Kubernetes高可用集群

2.部署kubelet

1)创建配置文件

cat > /opt/kubernetes/cfg/kubelet.conf << EOF

KUBELET_OPTS="--logtostderr=false \\

--v=2 \\

--log-dir=/opt/kubernetes/logs \\

--hostname-override=k8s-master \\

--network-plugin=cni \\

--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\

--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\

--config=/opt/kubernetes/cfg/kubelet-config.yml \\

--cert-dir=/opt/kubernetes/ssl \\

EOF

2) 配置参数文件

cat > /opt/kubernetes/cfg/kubelet-config.yml << EOF

kind: KubeletConfiguration

apiVersion: kubelet.config.k8s.io/v1beta1

address: 0.0.0.0

port: 10250

readOnlyPort: 10255

cgroupDriver: cgroupfs

clusterDNS:

- 10.0.0.2

clusterDomain: cluster.local

failSwapOn: false

authentication:

  anonymous:

    enabled: false

  webhook:

    cacheTTL: 2m0s

    enabled: true

  x509:

    clientCAFile: /opt/kubernetes/ssl/ca.pem

authorization:

  mode: Webhook

  webhook:

    cacheAuthorizedTTL: 5m0s

    cacheUnauthorizedTTL: 30s

evictionHard:

  imagefs.available: 15%

  memory.available: 100Mi

  nodefs.available: 10%

  nodefs.inodesFree: 5%

maxOpenFiles: 1000000

maxPods: 110

EOF

3)生成bootstrap.kubeconfig文件

KUBE_APISERVER="https://192.168.20.41:6443" # apiserver IP:PORT

TOKEN="c47ffb939f5ca36231d9e3121a252940" # 与token.csv里保持一致

 

# 生成 kubelet bootstrap kubeconfig 配置文件

kubectl config set-cluster kubernetes \

  --certificate-authority=/opt/kubernetes/ssl/ca.pem \

  --embed-certs=true \

  --server=${KUBE_APISERVER} \

  --kubeconfig=bootstrap.kubeconfig

kubectl config set-credentials "kubelet-bootstrap" \

  --token=${TOKEN} \

  --kubeconfig=bootstrap.kubeconfig

kubectl config set-context default \

  --cluster=kubernetes \

  --user="kubelet-bootstrap" \

  --kubeconfig=bootstrap.kubeconfig

kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

完整的二进制安装Kubernetes高可用集群

拷贝到配置文件路径:

[root@k8s-master ~]# cp bootstrap.kubeconfig  /opt/kubernetes/cfg/

4)systemd管理kubelet

cat > /usr/lib/systemd/system/kubelet.service << EOF

[Unit]

Description=Kubernetes Kubelet

After=docker.service

[Service]

EnvironmentFile=/opt/kubernetes/cfg/kubelet.conf

ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS

Restart=on-failure

LimitNOFILE=65536

[Install]

WantedBy=multi-user.target

EOF

5)启动并设置开机启动

[root@k8s-master ~]# systemctl daemon-reload

[root@k8s-master ~]# systemctl start kubelet

[root@k8s-master ~]# systemctl enable kubelet

完整的二进制安装Kubernetes高可用集群

6)批准kubelet证书申请并加入集群

# 查看kubelet证书请求

[root@k8s-master ~]# kubectl get csr

NAME           AGE   REQUESTOR           CONDITION

node-csr-RRToG-etiQOvWj1TcHU5JPwsKQgjsY9U7xMiOVIe2-o   68s   kubelet-bootstrap   Pending


# 批准申请

[root@k8s-master ~]# kubectl certificate approve node-csr-RRToG-etiQOvWj1TcHU5JPwsKQgjsY9U7xMiOVIe2-o


# 查看节点

[root@k8s-master ~]# kubectl get node

NAME         STATUS     ROLES    AGE   VERSION

k8s-master   NotReady   <none>   6s    v1.14.2

完整的二进制安装Kubernetes高可用集群

十、部署kube-proxy

1. 创建配置文件

cat > /opt/kubernetes/cfg/kube-proxy.conf << EOF

KUBE_PROXY_OPTS="--logtostderr=false \\

--v=2 \\

--log-dir=/opt/kubernetes/logs \\

--config=/opt/kubernetes/cfg/kube-proxy-config.yml"

EOF

2. 配置参数文件

cat > /opt/kubernetes/cfg/kube-proxy-config.yml << EOF

kind: KubeProxyConfiguration

apiVersion: kubeproxy.config.k8s.io/v1alpha1

bindAddress: 0.0.0.0

metricsBindAddress: 0.0.0.0:10249

clientConnection:

  kubeconfig: /opt/kubernetes/cfg/kube-proxy.kubeconfig

hostnameOverride: k8s-master

clusterCIDR: 10.0.0.0/24

EOF

3. 生成kube-proxy.kubeconfig文件

生成kube-proxy证书:

# 切换工作目录

[root@k8s-master ~]# cd tls/k8s/

# 创建证书请求文件

cat > kube-proxy-csr.json << EOF

{

  "CN": "system:kube-proxy",

  "hosts": [],

  "key": {

    "algo": "rsa",

    "size": 2048

  },

  "names": [

    {

      "C": "CN",

      "L": "BeiJing",

      "ST": "BeiJing",

      "O": "k8s",

      "OU": "System"

    }

  ]

}

EOF

# 生成证书

[root@k8s-master k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

[root@k8s-master k8s]# ls kube-proxy*pem

kube-proxy-key.pem  kube-proxy.pem

完整的二进制安装Kubernetes高可用集群

4.生成kubeconfig文件:

KUBE_APISERVER="https://192.168.20.41:6443"

kubectl config set-cluster kubernetes \

  --certificate-authority=/opt/kubernetes/ssl/ca.pem \

  --embed-certs=true \

  --server=${KUBE_APISERVER} \

  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-credentials kube-proxy \

  --client-certificate=./kube-proxy.pem \

  --client-key=./kube-proxy-key.pem \

  --embed-certs=true \

  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-context default \

  --cluster=kubernetes \

  --user=kube-proxy \

  --kubeconfig=kube-proxy.kubeconfig

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

[root@k8s-master ~]# cp kube-proxy.kubeconfig /opt/kubernetes/cfg/

1. systemd管理kube-proxy

cat > /usr/lib/systemd/system/kube-proxy.service << EOF

[Unit]

Description=Kubernetes Proxy

After=network.target

[Service]

EnvironmentFile=/opt/kubernetes/cfg/kube-proxy.conf

ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS

Restart=on-failure

LimitNOFILE=65536

[Install]

WantedBy=multi-user.target

EOF

2.  启动并设置开机启动

[root@k8s-master ~]# systemctl daemon-reload

[root@k8s-master ~]# systemctl start kube-proxy

[root@k8s-master ~]# systemctl enable kube-proxy

完整的二进制安装Kubernetes高可用集群

十一、部署CNI网络

[root@k8s-master ~]# mkdir /opt/cni/bin

[root@k8s-master ~]# tar -zxvf cni-plugins-linux-amd64-v0.8.5.tgz  -C /opt/cni/bin

[root@k8s-master ~]# kubectl apply -f /root/kube-flannel.yaml

[root@k8s-master ~]# kubectl apply -f /root/kube-flannel.yaml

podsecuritypolicy.policy/psp.flannel.unprivileged created

clusterrole.rbac.authorization.k8s.io/flannel created

clusterrolebinding.rbac.authorization.k8s.io/flannel created

serviceaccount/flannel created

configmap/kube-flannel-cfg created

daemonset.apps/kube-flannel-ds-amd64 created

完整的二进制安装Kubernetes高可用集群[root@k8s-master ~]# kubectl get pods -n kube-system

完整的二进制安装Kubernetes高可用集群

[root@k8s-master ~]# kubectl get node

完整的二进制安装Kubernetes高可用集群

1. 授权apiserver访问kubelet

cat > apiserver-to-kubelet-rbac.yaml << EOF

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRole

metadata:

  annotations:

    rbac.authorization.kubernetes.io/autoupdate: "true"

  labels:

    kubernetes.io/bootstrapping: rbac-defaults

  name: system:kube-apiserver-to-kubelet

rules:

  - apiGroups:

      - ""

    resources:

      - nodes/proxy

      - nodes/stats

      - nodes/log

      - nodes/spec

      - nodes/metrics

      - pods/log

    verbs:

      - "*"

---

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRoleBinding

metadata:

  name: system:kube-apiserver

  namespace: ""

roleRef:

  apiGroup: rbac.authorization.k8s.io

  kind: ClusterRole

  name: system:kube-apiserver-to-kubelet

subjects:

  - apiGroup: rbac.authorization.k8s.io

    kind: User

    name: kubernetes

EOF

[root@k8s-master ~]# kubectl apply -f apiserver-to-kubelet-rbac.yaml

clusterrole.rbac.authorization.k8s.io/system:kube-apiserver-to-kubelet created

clusterrolebinding.rbac.authorization.k8s.io/system:kube-apiserver created

完整的二进制安装Kubernetes高可用集群

十二、新增加Worker Node

1. 拷贝已部署好的Node相关文件到新node节点

在master节点部署的node文件拷贝到新节点node1:192.168.20.42,node2:192.168.20.43

[root@k8s-master ~]# scp -r /opt/kubernetes root@192.168.20.42:/opt/

[root@k8s-master ~]# scp -r /opt/kubernetes root@192.168.20.43:/opt/ 

[root@k8s-master ~]# scp -r /usr/lib/systemd/system/{kubelet,kube-proxy}.service root@192.168.20.42:/usr/lib/systemd/system

[root@k8s-master ~]# scp -r /usr/lib/systemd/system/{kubelet,kube-proxy}.service root@192.168.20.43:/usr/lib/systemd/system

完整的二进制安装Kubernetes高可用集群

[root@k8s-node1 ~]# rm -rf /opt/kubernetes/cfg/kubelet.kubeconfig

[root@k8s-node1 ~]# rm -f /opt/kubernetes/ssl/kubelet*

[root@k8s-node2 ~]# rm -f /opt/kubernetes/ssl/kubelet*

[root@k8s-node2 ~]# rm /opt/kubernetes/cfg/kubelet.kubeconfig -rf

2. 修改主机名

[root@k8s-node1 ~]# vim /opt/kubernetes/cfg/kubelet.conf

[root@k8s-node1 ~]# vim /opt/kubernetes/cfg/kube-proxy-config.yml

[root@k8s-node2 ~]# vim /opt/kubernetes/cfg/kubelet.conf

[root@k8s-node2 ~]# vim /opt/kubernetes/cfg/kube-proxy-config.yml

完整的二进制安装Kubernetes高可用集群

完整的二进制安装Kubernetes高可用集群

3.启动并设置开机启动

[root@k8s-node1 ~]# systemctl daemon-reload

[root@k8s-node1 ~]# systemctl start kubelet

[root@k8s-node1 ~]# systemctl enable kubelet

[root@k8s-node1 ~]# systemctl start kube-proxy

[root@k8s-node1 ~]# systemctl enable kube-proxy

4.在Master上批准新Node kubelet证书申请

[root@k8s-master ssl]# kubectl get csr

完整的二进制安装Kubernetes高可用集群

5.查看Node状态

完整的二进制安装Kubernetes高可用集群


上一篇:k8s 证书相关


下一篇:如何落地全球最大 Kubernetes 生产集群