阅读 68

Kubernetes 1.21.2 高可用二进制部署

花了2周业余时间研究K8s的部署,从最简单的rancher部署,到kubeadm部署,再到二进制部署,因为是运维苦手,只有看官方文档和网文,踩坑无数。最终由浅入深,通过二进制部署一遍,才算是对k8s的模块,证书,通信方式有了较深的了解。这种方式安装虽然麻烦点,但是排查错误,恢复是最快的。特记录一下安装手记。

1. 环境准备

1.1 主机规划

IP地址 机器名 机器配置 机器角色 安装软件
192.168.128.200 master01.th-k8s 2C 4G 50G master kube-apiserver、kube-controller-manager、kube-scheduler、etcd、haproxy、keepalived、nfs-client
192.168.128.201 master01.th-k8s 2C 4G 50G master kube-apiserver、kube-controller-manager、kube-scheduler、etcd、haproxy、keepalived、nfs-client
192.168.128.202 master01.th-k8s 2C 4G 50G master kube-apiserver、kube-controller-manager、kube-scheduler、etcd、haproxy、keepalived、nfs-client
192.168.128.203 node01.th-k8s 4C 16G 50G node kubelet、kube-proxy、nfs-client
192.168.128.204 node02.th-k8s 4C 16G 50G node kubelet、kube-proxy、nfs-client
192.168.128.205 node03.th-k8s 4C 16G 50G node kubelet、kube-proxy、nfs-client
192.168.128.206 data.th-k8s 2C 2G 100G data-volumes nfs server
192.168.128.208 / / VIP 虚拟IP 由HAProxy和keepalived组成的LB

1.2 软件版本

注意版本,不同版本可能配置发生变化,具体可以去官网翻阅

软件 版本
centos 7.9.2009 内核:5.12.9-1.el7.elrepo.x86_64
kube-apiserver、kube-controller-manager、kube-scheduler、kubelet、kube-proxy v 1.21.2
etcd v3.5.0
calico v3.19.1
coredns 1.8.4
docker 19.03.15
haproxy 1.5.18
keepalived 1.3.5

1.3 网络分配

网段信息 配置
Pod网段 172.168.0.0/12
Service网段 10.96.0.0/16

2. 搭建集群

2.1 所有机器准备工作

2.1.1 修改主机名

主机名称见1.1表
hostnamectl --static set-hostname master01.th-k8s

2.1.2 配置hosts文件

cat >> /etc/hosts << EOF
192.168.128.200 master01.th-k8s
192.168.128.201 master02.th-k8s
192.168.128.202 master03.th-k8s
192.168.128.203 node01.th-k8s
192.168.128.204 node02.th-k8s
192.168.128.205 node03.th-k8s
192.168.128.206 data.th-k8s
EOF

2.1.3 关闭防火墙和selinux

systemctl stop firewalld
setenforce 0
sed -i 's/^SELINUX=.\*/SELINUX=disabled/' /etc/selinux/config
#查看结果
sestatus

2.1.4 关闭交换分区

swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
echo "vm.swappiness = 0" >> /etc/sysctl.conf 
sysctl -p

2.1.5 时间同步

使用chronyd,安装配置略

timedatectl

2.1.6 系统配置

#limit优化
ulimit -SHn 65535

cat <<EOF >> /etc/security/limits.conf
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited
EOF

2.1.7 加载ipvs

yum install ipvsadm ipset sysstat conntrack libseccomp -y 

#所有节点配置ipvs模块,在内核4.19+版本nf_conntrack_ipv4已经改为nf_conntrack, 4.18以下使用nf_conntrack_ipv4即可: 
 
modprobe -- ip_vs 
modprobe -- ip_vs_rr 
modprobe -- ip_vs_wrr 
modprobe -- ip_vs_sh 
modprobe -- nf_conntrack 

 
#创建 /etc/modules-load.d/ipvs.conf 并加入以下内容: 
cat >/etc/modules-load.d/ipvs.conf <<EOF 
ip_vs 
ip_vs_lc 
ip_vs_wlc 
ip_vs_rr 
ip_vs_wrr 
ip_vs_lblc 
ip_vs_lblcr 
ip_vs_dh 
ip_vs_sh 
ip_vs_fo 
ip_vs_nq 
ip_vs_sed 
ip_vs_ftp 
ip_vs_sh 
nf_conntrack 
ip_tables 
ip_set 
xt_set 
ipt_set 
ipt_rpfilter 
ipt_REJECT 
ipip 
EOF

#设置为开机启动
systemctl enable --now systemd-modules-load.service

2.1.8 K8s内核优化

cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720

net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 131072
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
sysctl --system

#所有节点配置完内核后,重启服务器,保证重启后内核依旧加载
reboot -h now

#重启后查看结果:
lsmod | grep --color=auto -e ip_vs -e nf_conntrack

2.1.9 安装其他工具(可选)

yum install wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git lrzsz -y

2.2 master节点安装

2.2.1 配置免密

# 在master01上操作,ssh端口进行过修改,为4956
cd /root
ssh-keygen -t rsa
for i in master02.th-k8s master03.th-k8s node01.th-k8s node02.th-k8s node03.th-k8s;do ssh-copy-id -i /root/.ssh/id_rsa.pub -p 4956 $i;done
#测试
ssh -p 4956 master03.th-k8s

2.2.2 haproxy和keepalived部署高可用

2.2.2.1 安装

yum install keepalived haproxy -y

2.2.2.2 配置haproxy
cat >/etc/haproxy/haproxy.cfg<<"EOF"
global
 maxconn 2000
 ulimit-n 16384
 log 127.0.0.1 local0 err
 stats timeout 30s

defaults
 log global
 mode http
 option httplog
 timeout connect 5000
 timeout client 50000
 timeout server 50000
 timeout http-request 15s
 timeout http-keep-alive 15s

frontend monitor-in
 bind *:33305
 mode http
 option httplog
 monitor-uri /monitor

frontend k8s-master
 bind 0.0.0.0:16443
 bind 127.0.0.1:16443
 mode tcp
 option tcplog
 tcp-request inspect-delay 5s
 default_backend k8s-master

backend k8s-master
 mode tcp
 option tcplog
 option tcp-check
 balance roundrobin
 default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
 server  master01.th-k8s  192.168.128.200:6443 check
 server  master02.th-k8s  192.168.128.201:6443 check
 server  master03.th-k8s  192.168.128.202:6443 check
EOF
2.2.2.3 配置KeepAlived

每个masrer配置不一样,注意区分

#master01 配置:
cat >/etc/keepalived/keepalived.conf<<"EOF"
! Configuration File for keepalived
global_defs {
   router_id LVS_DEVEL
script_user root
   enable_script_security
}
vrrp_script chk_apiserver {
   script "/etc/keepalived/check_apiserver.sh"
   interval 5
   weight -5
   fall 2 
rise 1
}
vrrp_instance VI_1 {
   state MASTER
   interface ens160
   mcast_src_ip 192.168.128.200
   virtual_router_id 51
   priority 100
   advert_int 2
   authentication {
       auth_type PASS
       auth_pass K8SHA_KA_AUTH
   }
   virtual_ipaddress {
       192.168.128.208
   }
   track_script {
      chk_apiserver
   }
}
EOF

#Master02 配置:
cat >/etc/keepalived/keepalived.conf<<"EOF"
! Configuration File for keepalived
global_defs {
   router_id LVS_DEVEL
script_user root
   enable_script_security
}
vrrp_script chk_apiserver {
   script "/etc/keepalived/check_apiserver.sh"
  interval 5
   weight -5
   fall 2 
rise 1
}
vrrp_instance VI_1 {
   state BACKUP
   interface ens160
   mcast_src_ip 192.168.128.201
   virtual_router_id 51
   priority 99
   advert_int 2
   authentication {
       auth_type PASS
       auth_pass K8SHA_KA_AUTH
   }
   virtual_ipaddress {
       192.168.128.208
   }
   track_script {
      chk_apiserver
   }
}
EOF

#Master03 配置:
cat >/etc/keepalived/keepalived.conf<<"EOF"
! Configuration File for keepalived
global_defs {
   router_id LVS_DEVEL
script_user root
   enable_script_security
}
vrrp_script chk_apiserver {
   script "/etc/keepalived/check_apiserver.sh"
 interval 5
   weight -5
   fall 2 
rise 1
}
vrrp_instance VI_1 {
   state BACKUP
   interface ens160
   mcast_src_ip 192.168.128.202
   virtual_router_id 51
   priority 98
   advert_int 2
   authentication {
       auth_type PASS
       auth_pass K8SHA_KA_AUTH
   }
   virtual_ipaddress {
       192.168.128.208
   }
    track_script {
      chk_apiserver
   }
EOF
2.2.2.4 健康检查脚本
cat > /etc/keepalived/check_apiserver.sh <<"EOF"
#!/bin/bash
err=0
for k in $(seq 1 3)
do
   check_code=$(pgrep haproxy)
   if [[ $check_code == "" ]]; then
       err=$(expr $err + 1)
       sleep 1
       continue
   else
       err=0
       break
   fi
done

if [[ $err != "0" ]]; then
   echo "systemctl stop keepalived"
   /usr/bin/systemctl stop keepalived
   exit 1
else
   exit 0
fi
EOF

chmod u+x /etc/keepalived/check_apiserver.sh
2.2.2.5 启动服务
systemctl daemon-reload
systemctl enable --now haproxy
systemctl enable --now keepalived
2.2.2.6 检查状态
#master01,看到vip
ip addr

#各节点测试
ping 192.168.128.208 -c 4
telnet  192.168.128.208 16443
systemctl status keepalived haproxy 

如果过一段时间后不能PING通VIP

# 先清理master的arp,将vip切回至master,ping vip正常,再清理slave的arp
arp -n|awk '/^[1-9]/{system("arp -d "$1)}'

2.2.3 搭建etcd集群

2.2.3.1 配置工作目录
# 在master01上创建工作目录
mkdir -p /data/k8s-work
2.2.3.2 生成cfssl证书

安装cfssl工具

cd /data/k8s-work
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64

chmod +x cfssl*
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo

配置ca请求文件

cat > ca-csr.json <<"EOF"
{
  "CN": "kubernetes",
  "key": {
      "algo": "rsa",
      "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Hubei",
      "L": "shiyan",
      "O": "k8s",
      "OU": "system"
    }
  ],
  "ca": {
          "expiry": "87600h"
  }
}
EOF

创建ca证书

cfssl gencert -initca ca-csr.json | cfssljson -bare ca

配置ca证书策略

cat > ca-config.json <<"EOF"
{
  "signing": {
      "default": {
          "expiry": "87600h"
        },
      "profiles": {
          "kubernetes": {
              "usages": [
                  "signing",
                  "key encipherment",
                  "server auth",
                  "client auth"
              ],
              "expiry": "87600h"
          }
      }
  }
}
EOF

配置etcd请求csr文件

cat > etcd-csr.json <<"EOF"
{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "192.168.128.200",
    "192.168.128.201",
    "192.168.128.202"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [{
    "C": "CN",
    "ST": "Hubei",
    "L": "shiyan",
    "O": "k8s",
    "OU": "system"
  }]
}
EOF

生成证书

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson  -bare etcd

ls etcd*.pem
# etcd-key.pem  etcd.pem
2.2.3.3 部署etcd集群

下载分发etcd软件包

wget https://github.com/etcd-io/etcd/releases/download/v3.5.0/etcd-v3.5.0-linux-amd64.tar.gz
tar -xvf etcd-v3.5.0-linux-amd64.tar.gz
cp -p etcd-v3.5.0-linux-amd64/etcd* /usr/local/bin/
scp -P 4956 etcd-v3.5.0-linux-amd64/etcd* master02.th-k8s:/usr/local/bin/
scp -P 4956 etcd-v3.5.0-linux-amd64/etcd* master03.th-k8s:/usr/local/bin/

创建配置文件

cat >  etcd.conf <<"EOF"
#[Member]
ETCD_NAME="etcd1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.128.200:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.128.200:2379,http://127.0.0.1:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.128.200:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.128.200:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.128.200:2380,etcd2=https://192.168.128.201:2380,etcd3=https://192.168.128.202:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

ETCD_NAME:节点名称,集群中唯一
ETCD_DATA_DIR:数据目录
ETCD_LISTEN_PEER_URLS:集群通信监听地址
ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址
ETCD_INITIAL_ADVERTISE_PEER_URLS:集群通告地址
ETCD_ADVERTISE_CLIENT_URLS:客户端通告地址
ETCD_INITIAL_CLUSTER:集群节点地址
ETCD_INITIAL_CLUSTER_TOKEN:集群Token
ETCD_INITIAL_CLUSTER_STATE:加入集群的当前状态,new是新集群,existing表示加入已有集群

创建启动service

cat > etcd.service <<"EOF"
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=-/etc/etcd/etcd.conf
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
  --cert-file=/etc/etcd/ssl/etcd.pem \
  --key-file=/etc/etcd/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --peer-cert-file=/etc/etcd/ssl/etcd.pem \
  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \
  --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --peer-client-cert-auth \
  --client-cert-auth
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

各节点创建etcd目录

mkdir -p /etc/etcd
mkdir -p /etc/etcd/ssl
mkdir -p /var/lib/etcd/default.etcd

同步到各个节点

cp ca*.pem /etc/etcd/ssl/
cp etcd*.pem /etc/etcd/ssl/
cp etcd.conf /etc/etcd/
cp etcd.service /usr/lib/systemd/system/
for i in master02.th-k8s master03.th-k8s;do scp -P 4956 etcd.conf $i:/etc/etcd/;done
for i in master02.th-k8s master03.th-k8s;do scp -P 4956 etcd*.pem ca*.pem $i:/etc/etcd/ssl/;done
for i in master02.th-k8s master03.th-k8s;do scp -P 4956 etcd.service $i:/usr/lib/systemd/system/;done

master2和master3分别修改配置文件中etcd名字和ip

vim /etc/etcd/etcd.conf

启动etcd集群

systemctl daemon-reload
systemctl enable --now etcd.service
systemctl status etcd

查看集群状态

ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.128.200:2379,https://192.168.128.201:2379,https://192.168.128.202:2379 endpoint health

2.2.4 kubernetes 部署

2.2.4.1 下载分发安装包
wget https://dl.k8s.io/v1.21.2/kubernetes-server-linux-amd64.tar.gz
tar -xvf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin/
cp kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/local/bin/
scp -P 4956  kube-apiserver kube-controller-manager kube-scheduler kubectl master02.th-k8s:/usr/local/bin/
scp -P 4956  kube-apiserver kube-controller-manager kube-scheduler kubectl master03.th-k8s:/usr/local/bin/
for i in node01.th-k8s node02.th-k8s node03.th-k8s;do scp -P 4956 kubelet kube-proxy $i:/usr/local/bin/;done
2.2.4.2 所有节点创建工作目录
mkdir -p /etc/kubernetes/        
mkdir -p /etc/kubernetes/ssl     
mkdir -p /var/log/kubernetes        
2.2.4.3 部署api-server

创建apiserver-csr

cat > kube-apiserver-csr.json << "EOF"
{
"CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "192.168.128.200",
    "192.168.128.201",
    "192.168.128.202",
    "192.168.128.203",
    "192.168.128.204",
    "192.168.128.205",
    "192.168.128.206",
    "192.168.128.207",
    "192.168.128.208",
    "192.168.128.209",
    "192.168.128.210",
    "192.168.128.211",
    "192.168.128.212",
    "10.96.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Hubei",
      "L": "shiyan",
      "O": "k8s",
      "OU": "system"
    }
  ]
}
EOF

如果 hosts 字段不为空则需要指定授权使用该证书的 IP 或域名列表。由于该证书被 集群使用,需要将节点的IP都填上,为了方便后期扩容可以多写几个预留的IP。同时还需要填写 service 网络的首个IP(一般是 kube-apiserver 指定的 service-cluster-ip-range 网段的第一个IP,如 10.96.0.1)。

生成证书和token文件

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiserver

cat > token.csv << EOF
$(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

创建配置文件

cat > kube-apiserver.conf << "EOF"
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
  --anonymous-auth=false \
  --bind-address=192.168.128.200 \
  --secure-port=6443 \
  --advertise-address=192.168.128.200 \
  --insecure-port=0 \
  --authorization-mode=Node,RBAC \
  --runtime-config=api/all=true \
  --enable-bootstrap-token-auth \
  --service-cluster-ip-range=10.96.0.0/16 \
  --token-auth-file=/etc/kubernetes/token.csv \
  --service-node-port-range=30000-50000 \
  --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem  \
  --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \
  --client-ca-file=/etc/kubernetes/ssl/ca.pem \
  --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \
  --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \
  --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem  \
  --service-account-issuer=api \
  --etcd-cafile=/etc/etcd/ssl/ca.pem \
  --etcd-certfile=/etc/etcd/ssl/etcd.pem \
  --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
  --etcd-servers=https://192.168.128.200:2379,https://192.168.128.201:2379,https://192.168.128.202:2379 \
  --enable-swagger-ui=true \
  --allow-privileged=true \
  --apiserver-count=3 \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/var/log/kube-apiserver-audit.log \
  --event-ttl=1h \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=4"
EOF

kube-apiserver命令行参考

创建apiserver服务启动文件··

cat > kube-apiserver.service << "EOF"
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=etcd.service
Wants=etcd.service

[Service]
EnvironmentFile=-/etc/kubernetes/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

同步相关文件到各个节点

cp ca*.pem /etc/kubernetes/ssl/
cp kube-apiserver*.pem /etc/kubernetes/ssl/
cp token.csv /etc/kubernetes/
cp kube-apiserver.conf /etc/kubernetes/ 
cp kube-apiserver.service /usr/lib/systemd/system/
scp -P 4956  token.csv master02.th-k8s:/etc/kubernetes/
scp -P 4956  token.csv master03.th-k8s:/etc/kubernetes/
scp -P 4956  kube-apiserver*.pem master02.th-k8s:/etc/kubernetes/ssl/
scp -P 4956  kube-apiserver*.pem master03.th-k8s:/etc/kubernetes/ssl/
scp -P 4956  ca*.pem master02.th-k8s:/etc/kubernetes/ssl/
scp -P 4956  ca*.pem master03.th-k8s:/etc/kubernetes/ssl/
scp -P 4956  kube-apiserver.conf master02.th-k8s:/etc/kubernetes/
scp -P 4956  kube-apiserver.conf master03.th-k8s:/etc/kubernetes/
scp -P 4956  kube-apiserver.service master02.th-k8s:/usr/lib/systemd/system/
scp -P 4956  kube-apiserver.service master03.th-k8s:/usr/lib/systemd/system/

master2和master3配置文件的IP地址修改为实际的本机IP

vim kube-apiserver.conf

启动服务

systemctl daemon-reload
systemctl enable --now kube-apiserver

systemctl status kube-apiserver
# 测试
curl --insecure https://192.168.128.200:6443/
curl --insecure https://192.168.128.201:6443/
curl --insecure https://192.168.128.202:6443/
curl --insecure https://192.168.128.208:16443/
2.2.4.4 部署kubectl

创建csr请求文件

cat > admin-csr.json << "EOF"
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Hubei",
      "L": "shiyan",
      "O": "system:masters",             
      "OU": "system"
    }
  ]
}
EOF

说明:
后续 kube-apiserver 使用 RBAC 对客户端(如 kubelet、kube-proxy、Pod)请求进行授权;
kube-apiserver 预定义了一些 RBAC 使用的 RoleBindings,如 cluster-admin 将 Group system:masters 与 Role cluster-admin 绑定,该 Role 授予了调用kube-apiserver 的所有 API的权限;
O指定该证书的 Group 为 system:masters,kubelet 使用该证书访问 kube-apiserver 时 ,由于证书被 CA 签名,所以认证通过,同时由于证书用户组为经过预授权的 system:masters,所以被授予访问所有 API 的权限;
注:
这个admin 证书,是将来生成管理员用的kube config 配置文件用的,现在我们一般建议使用RBAC 来对kubernetes 进行角色权限控制, kubernetes 将证书中的CN 字段 作为User, O 字段作为 Group;
"O": "system:masters", 必须是system:masters,否则后面kubectl create clusterrolebinding报错。

生成证书

[root@master1 work]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
[root@master1 work]# cp admin*.pem /etc/kubernetes/ssl/

kubeconfig配置

kube.config 为 kubectl 的配置文件,包含访问 apiserver 的所有信息,如 apiserver 地址、CA 证书和自身使用的证书

kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.128.208:16443 --kubeconfig=kube.config

kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=kube.config

kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kube.config

kubectl config use-context kubernetes --kubeconfig=kube.config

mkdir ~/.kube
cp kube.config ~/.kube/config
kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes --kubeconfig=~/.kube/config

查看集群状态

export KUBECONFIG=$HOME/.kube/config

kubectl cluster-info
kubectl get componentstatuses
kubectl get all --all-namespaces

同步kubectl配置文件到其他节点

scp -P 4956   /root/.kube/config master02.th-k8s:/root/.kube/
scp -P 4956   /root/.kube/config master03.th-k8s:/root/.kube/

配置kubectl子命令补全

yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
kubectl completion bash > ~/.kube/completion.bash.inc
source '/root/.kube/completion.bash.inc'  
source $HOME/.bash_profile
2.2.4.5 部署kube-controller-manager

创建csr请求文件

cat > kube-controller-manager-csr.json << "EOF"
{
    "CN": "system:kube-controller-manager",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "hosts": [
      "127.0.0.1",
      "192.168.128.200",
      "192.168.128.201",
      "192.168.128.202"
    ],
    "names": [
      {
        "C": "CN",
        "ST": "Hubei",
        "L": "shiyan",
        "O": "system:kube-controller-manager",
        "OU": "system"
      }
    ]
}
EOF

hosts 列表包含所有 kube-controller-manager 节点 IP;
CN 为 system:kube-controller-manager、O 为 system:kube-controller-manager,kubernetes 内置的 ClusterRoleBindings system:kube-controller-manager 赋予 kube-controller-manager 工作所需的权限

生成证书

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager

ls kube-controller-manager*.pem

创建kube-controller-manager的kube-controller-manager.kubeconfig

kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.128.208:16443 --kubeconfig=kube-controller-manager.kubeconfig

kubectl config set-credentials system:kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=kube-controller-manager.kubeconfig

kubectl config set-context system:kube-controller-manager --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig

kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig

创建配置文件kube-controller-manager.conf

cat > kube-controller-manager.conf << "EOF"
KUBE_CONTROLLER_MANAGER_OPTS="--port=10252 \
  --secure-port=10257 \
  --bind-address=127.0.0.1 \
  --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
  --service-cluster-ip-range=10.96.0.0/16 \
  --cluster-name=kubernetes \
  --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
  --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --allocate-node-cidrs=true \
  --cluster-cidr=172.168.0.0/16 \
  --experimental-cluster-signing-duration=87600h \
  --root-ca-file=/etc/kubernetes/ssl/ca.pem \
  --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --leader-elect=true \
  --feature-gates=RotateKubeletServerCertificate=true \
  --controllers=*,bootstrapsigner,tokencleaner \
  --horizontal-pod-autoscaler-use-rest-clients=true \
  --horizontal-pod-autoscaler-sync-period=10s \
  --tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \
  --tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \
  --use-service-account-credentials=true \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=2"
EOF

[kube-controller-manager命令行参考](kube-controller-manager | Kubernetes)

创建启动文件

cat > kube-controller-manager.service << "EOF"
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/kube-controller-manager.conf
ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

同步相关文件到各个节点

cp kube-controller-manager*.pem /etc/kubernetes/ssl/
cp kube-controller-manager.kubeconfig /etc/kubernetes/
cp kube-controller-manager.conf /etc/kubernetes/
cp kube-controller-manager.service /usr/lib/systemd/system/
scp -P 4956 kube-controller-manager*.pem master02.th-k8s:/etc/kubernetes/ssl/
scp -P 4956 kube-controller-manager*.pem master03.th-k8s:/etc/kubernetes/ssl/
scp -P 4956 kube-controller-manager.kubeconfig kube-controller-manager.conf master02.th-k8s:/etc/kubernetes/
scp -P 4956 kube-controller-manager.kubeconfig kube-controller-manager.conf master03.th-k8s:/etc/kubernetes/
scp -P 4956 kube-controller-manager.service master02.th-k8s:/usr/lib/systemd/system/
scp -P 4956 kube-controller-manager.service master03.th-k8s:/usr/lib/systemd/system/

#查看证书
openssl x509 -in /etc/kubernetes/ssl/kube-controller-manager.pem -noout -text

启动服务

systemctl daemon-reload 
systemctl enable --now kube-controller-manager
systemctl status kube-controller-manager
2.2.4.6 部署kube-scheduler

创建csr请求文件

cat > kube-scheduler-csr.json << "EOF"
{
    "CN": "system:kube-scheduler",
    "hosts": [
      "127.0.0.1",
      "192.168.128.200",
      "192.168.128.201",
      "192.168.128.202"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
      {
        "C": "CN",
        "ST": "Hubei",
        "L": "shiyan",
        "O": "system:kube-scheduler",
        "OU": "system"
      }
    ]
}
EOF

生成证书

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler

ls kube-scheduler*.pem

创建kube-scheduler的kubeconfig

kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.128.208:16443 --kubeconfig=kube-scheduler.kubeconfig

kubectl config set-credentials system:kube-scheduler --client-certificate=kube-scheduler.pem --client-key=kube-scheduler-key.pem --embed-certs=true --kubeconfig=kube-scheduler.kubeconfig

kubectl config set-context system:kube-scheduler --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig

kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig

创建配置文件

cat > kube-scheduler.conf << "EOF"
KUBE_SCHEDULER_OPTS="--address=127.0.0.1 \
--kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \
--leader-elect=true \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=2"
EOF

[kube-scheduler命令行参考](kube-scheduler | Kubernetes)

创建服务启动文件

cat > kube-scheduler.service << "EOF"
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/kube-scheduler.conf
ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

同步相关文件到各个节点

cp kube-scheduler*.pem /etc/kubernetes/ssl/
cp kube-scheduler.kubeconfig /etc/kubernetes/
cp kube-scheduler.conf /etc/kubernetes/
cp kube-scheduler.service /usr/lib/systemd/system/
scp -P 4956 kube-scheduler*.pem master02.th-k8s:/etc/kubernetes/ssl/
scp -P 4956 kube-scheduler*.pem master03.th-k8s:/etc/kubernetes/ssl/
scp -P 4956 kube-scheduler.kubeconfig kube-scheduler.conf master02.th-k8s:/etc/kubernetes/
scp -P 4956 kube-scheduler.kubeconfig kube-scheduler.conf master03.th-k8s:/etc/kubernetes/
scp -P 4956 kube-scheduler.service master02.th-k8s:/usr/lib/systemd/system/
scp -P 4956 kube-scheduler.service master03.th-k8s:/usr/lib/systemd/system/

启动服务

systemctl daemon-reload
systemctl enable --now kube-scheduler
systemctl status kube-scheduler

2.3 work节点安装

2.3.1 docker安装配置

sudo yum remove docker-ce docker-ce-cli  -y
curl https://releases.rancher.com/install-docker/19.03.sh | sh
systemctl enable -now docker

cat <<EOF | sudo tee /etc/docker/daemon.json
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ],
   "registry-mirrors": ["https://yourid.mirror.aliyuncs.com"]
}
EOF

systemctl restart docker

2.3.2 kubernetes部署

2.3.2.1 部署kubelet

以下操作在master01上操作
创建kubelet-bootstrap.kubeconfig

BOOTSTRAP_TOKEN=$(awk -F "," '{print $1}' /etc/kubernetes/token.csv)

kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.128.208:16443 --kubeconfig=kubelet-bootstrap.kubeconfig

kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=kubelet-bootstrap.kubeconfig

kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig

kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig

kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig

创建配置文件

cat > kubelet.json << "EOF"
{
  "kind": "KubeletConfiguration",
  "apiVersion": "kubelet.config.k8s.io/v1beta1",
  "authentication": {
    "x509": {
      "clientCAFile": "/etc/kubernetes/ssl/ca.pem"
    },
    "webhook": {
      "enabled": true,
      "cacheTTL": "2m0s"
    },
    "anonymous": {
      "enabled": false
    }
  },
  "authorization": {
    "mode": "Webhook",
    "webhook": {
      "cacheAuthorizedTTL": "5m0s",
      "cacheUnauthorizedTTL": "30s"
    }
  },
  "address": "192.168.128.200",
  "port": 10250,
  "readOnlyPort": 10255,
  "cgroupDriver": "systemd",                    
  "hairpinMode": "promiscuous-bridge",
  "serializeImagePulls": false,
  "clusterDomain": "cluster.local.",
  "clusterDNS": ["10.96.0.2"]
}
EOF

clusterDNS的配置,后面配置coredns会用到

创建启动文件

cat > kubelet.service << "EOF"
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
  --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \
  --cert-dir=/etc/kubernetes/ssl \
  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
  --config=/etc/kubernetes/kubelet.json \
  --network-plugin=cni \
  --rotate-certificates \
  --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.2 \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=2
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

[kubelet命令行参考](kubelet | Kubernetes)

同步相关文件到各个节点

cp kubelet-bootstrap.kubeconfig /etc/kubernetes/
cp kubelet.json /etc/kubernetes/
cp kubelet.service /usr/lib/systemd/system/

for i in  node01.th-k8s node02.th-k8s node03.th-k8s;do scp -P 4956 kubelet-bootstrap.kubeconfig kubelet.json $i:/etc/kubernetes/;done

for i in  node01.th-k8s node02.th-k8s node03.th-k8s;do scp -P 4956 ca.pem $i:/etc/kubernetes/ssl/;done

for i in node01.th-k8s node02.th-k8s node03.th-k8s;do scp -P 4956 kubelet.service $i:/usr/lib/systemd/system/;done

在各个节点执行

mkdir -p /var/lib/kubelet
mkdir -p /var/log/kubernetes
systemctl daemon-reload
systemctl enable --now kubelet

systemctl status kubelet

确认kubelet服务启动成功后,接着到master上Approve一下bootstrap请求。
kubectl get csr | grep Pending | awk '{print $1}' | xargs kubectl certificate approve
查看一下node是否加入成功
kubectl get nodes

2.3.2.2 部署kube-proxy

创建csr请求文件

cat > kube-proxy-csr.json << "EOF"
{
  "CN": "system:kube-proxy",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Hubei",
      "L": "Wuhan",
      "O": "k8s",
      "OU": "system"
    }
  ]
}
EOF

生成证书

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
ls kube-proxy*.pem

创建kubeconfig文件

kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.128.208:16443 --kubeconfig=kube-proxy.kubeconfig

kubectl config set-credentials kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig

kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

创建kube-proxy配置文件

cat > kube-proxy.yaml << "EOF"
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 192.168.128.203
clientConnection:
  kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
clusterCIDR: 172.168.0.0/12
healthzBindAddress: 192.168.128.203:10256
kind: KubeProxyConfiguration
metricsBindAddress: 192.168.128.203:10249
mode: "ipvs"
EOF

[kube-proxy命令行参考](kube-proxy | Kubernetes)

创建服务启动文件

cat >  kube-proxy.service << "EOF"
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \
  --config=/etc/kubernetes/kube-proxy.yaml \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

同步文件到各个节点

cp kube-proxy*.pem /etc/kubernetes/ssl/
cp kube-proxy.kubeconfig kube-proxy.yaml /etc/kubernetes/
cp kube-proxy.service /usr/lib/systemd/system/
for i in node01.th-k8s node02.th-k8s node03.th-k8s;do scp -P 4956 kube-proxy.kubeconfig kube-proxy.yaml $i:/etc/kubernetes/;done
for i in node01.th-k8s node02.th-k8s node03.th-k8s;do scp -P 4956 kube-proxy.service $i:/usr/lib/systemd/system/;done

在各node修改kube-proxy.yaml中address修改为各节点的实际IP

vim /etc/kubernetes/kube-proxy.yaml

启动服务

mkdir -p /var/lib/kube-proxy
systemctl daemon-reload
systemctl enable --now kube-proxy

systemctl status kube-proxy

2.4 部署网络组件

2.4.1 安装calico

wget https://docs.projectcalico.org/v3.19/manifests/calico.yaml
kubectl apply -f calico.yaml 

查看状态,各个节点,均为Ready状态

kubectl get pods -A
kubectl get nodes

2.4.2 部署coredns

cat >  coredns.yaml << "EOF"
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
rules:
  - apiGroups:
    - ""
    resources:
    - endpoints
    - services
    - pods
    - namespaces
    verbs:
    - list
    - watch
  - apiGroups:
    - discovery.k8s.io
    resources:
    - endpointslices
    verbs:
    - list
    - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
data:
  Corefile: |
    .:53 {
        errors
        health {
          lameduck 5s
        }
        ready
        kubernetes cluster.local  in-addr.arpa ip6.arpa {
          fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . /etc/resolv.conf {
          max_concurrent 1000
        }
        cache 30
        loop
        reload
        loadbalance
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/name: "CoreDNS"
spec:
  # replicas: not specified here:
  # 1. Default is 1.
  # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        kubernetes.io/os: linux
      affinity:
         podAntiAffinity:
           preferredDuringSchedulingIgnoredDuringExecution:
           - weight: 100
             podAffinityTerm:
               labelSelector:
                 matchExpressions:
                   - key: k8s-app
                     operator: In
                     values: ["kube-dns"]
               topologyKey: kubernetes.io/hostname
      containers:
      - name: coredns
        image: coredns/coredns:1.8.4
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /ready
            port: 8181
            scheme: HTTP
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.96.0.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP
 
EOF

clusterIP为:10.96.0.2(kubelet配置文件中的clusterDNS)

kubectl apply -f coredns.yaml

2.4.3 部署nginx验证

cat >  nginx.yaml  << "EOF"
---
apiVersion: v1
kind: ReplicationController
metadata:
  name: nginx-controller
spec:
  replicas: 2
  selector:
    name: nginx
  template:
    metadata:
      labels:
        name: nginx
    spec:
      containers:
        - name: nginx
          image: nginx:1.19.6
          ports:
            - containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
  name: nginx-service-nodeport
spec:
  ports:
    - port: 80
      targetPort: 80
      nodePort: 30001
      protocol: TCP
  type: NodePort
  selector:
    name: nginx
EOF

部署

kubectl apply -f nginx.yaml
kubectl get svc
kubectl get pods -o wide

访问nginx验证

删除ngxin

kubectl delete -f nginx.yaml

作者:这里有鱼

原文链接:https://www.jianshu.com/p/b02c428950df

文章分类
后端
版权声明:本站是系统测试站点,无实际运营。本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌抄袭侵权/违法违规的内容, 请发送邮件至 XXXXXXo@163.com 举报,一经查实,本站将立刻删除。
相关推荐