侧边栏壁纸
  • 累计撰写 30 篇文章
  • 累计创建 35 个标签
  • 累计收到 4 条评论

Kubernetes 高可用二进制部署

GoDan
2022-05-20 / 0 评论 / 0 点赞 / 116 阅读 / 47,811 字 / 正在检测是否收录...
温馨提示:
本文最后更新于 2022-05-26,若内容或图片失效,请留言反馈。部分素材来自网络,若不小心影响到您的利益,请联系我们删除。

官网命令解释
http://kubernetes.kansea.com/docs/user-guide/kubectl/kubectl_config_set-cluster/

主机信息

主机名 IP 配置 备注
master1 172.16.48.133 2c2g kube-apiserver、kube-controller-manager、kube-scheduler、etcd、haproxy、keepalived
master2 172.16.48.134 2c2g kube-apiserver、kube-controller-manager、kube-scheduler、etcd、haproxy、keepalived
master3 172.16.48.135 2c2g kube-apiserver、kube-controller-manager、kube-scheduler、etcd、haproxy、keepalived
node1 172.16.48.131 2c2g kubelet、kube-proxy
node2 172.16.48.132 2c2g kubelet、kube-proxy
VIP 172.16.48.200 keepalived+haproxy

软件版本

软件 版本
系统 CentOS Linux release 7.9.2009 ;内核3.10
kube-apiserver、kube-controller-manager、kube-scheduler、kubelet、kube-proxy v1.23.0
etcd v3.4.9
docker 20.10.16

IP划分

服务 IP段
service 10.50.0.0/16
pod 10.200.0.0/16
DNS 10.50.0.10
service-ip 10.50.0.1

系统初始化设置(所有节点)

先在一台生成密钥对,通过密钥的方式在多台主机之间传输文件

[root@master ~]# ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:3gzGdTcZIjtcFhRkopGywbdf9ULCL3fDlEQyHJMtptI root@master
The key's randomart image is:
+---[RSA 2048]----+
|     .  ..o+@BBo.|
|      + o+ Bo*** |
|       =..* +=B. |
|      ...o Eo.++o|
|        S... o o.|
|       o +.      |
|        . o      |
|                 |
|                 |
+----[SHA256]-----+
[root@master ~]# cat .ssh/id_rsa.pub
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCxbOjE9T1JfGLgQ8CoioeJYurQvIXJ6AlOiMyI6CDVjruoa8d8bIXOQBFn0ik1/TvZwnxRq4kgE2OyTE8FCyDRoG9JXV2DgDyMwCuk/RhkUbEnBVa5Q4feRA6MTIw12Ggu+myEI2x8AhlYuxeZIsYy0z3nlIchIYYu9vsLZY0PCTPiKMVyPx48o3ahk8jVvRddye208dADN1Q+sMudO97aKfrZU+dcqPFIpXSLq7JHqxycogyaz+Vf5OIyEgVmJf/Bf806qPsObsGLC57NKbToT9KVDd3EI4NljI1NLU2/QMViSxbueEIMD1rO5jnTmG6oIh++M7WGm0F2dBRzS4ax root@master

在所有节点操作

如果用iterm工具,可以使用command+shift+i 同时操作多个窗口

# 创建.ssh目录,写入公钥
mkdir -p /root/.ssh
echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCxbOjE9T1JfGLgQ8CoioeJYurQvIXJ6AlOiMyI6CDVjruoa8d8bIXOQBFn0ik1/TvZwnxRq4kgE2OyTE8FCyDRoG9JXV2DgDyMwCuk/RhkUbEnBVa5Q4feRA6MTIw12Ggu+myEI2x8AhlYuxeZIsYy0z3nlIchIYYu9vsLZY0PCTPiKMVyPx48o3ahk8jVvRddye208dADN1Q+sMudO97aKfrZU+dcqPFIpXSLq7JHqxycogyaz+Vf5OIyEgVmJf/Bf806qPsObsGLC57NKbToT9KVDd3EI4NljI1NLU2/QMViSxbueEIMD1rO5jnTmG6oIh++M7WGm0F2dBRzS4ax root@master' >> /root/.ssh/authorized_keys
chmod 700 /root/.ssh
chmod 600 /root/.ssh/authorized_keys

# 安装软件包
yum -y install wget conntrack ipvsadm ipset jq sysstat curl iptables libseccomp vim

# 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld

# 重置iptables
iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat && iptables -P FORWARD ACCEPT

# 关闭selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config # 永久
setenforce 0 # 临时

# 关闭swap
swapoff -a # 临时
sed -ri 's/.*swap.*/#&/' /etc/fatab # 永久

# 规划主机名
hostnamectl set-hostname <hostname>

# 增加hosts绑定
cat >> /etc/hosts << EOF
172.16.48.135 master3
172.16.48.134 master2
172.16.48.133 master
172.16.48.132 node2
172.16.48.131 node3
EOF

# 将桥接的IPv4流量传递到iptables的链
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward=1
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
EOF
sysctl -p /etc/sysctl.d/k8s.conf # 生效

# 时间同步
yum -y install ntpdate
ntpdate time.windows.com

部署高可用(主节点操作)

安装软件包

yum install keepalived haproxy -y

配置haproxy负载

# 备份原文件
cp  /etc/haproxy/haproxy.cfg   /etc/haproxy/haproxy.cfg.bak
# 写入配置
cat > /etc/haproxy/haproxy.cfg << EOF
global
    maxconn 20480 # 默认最大连接数
    ulimit-n 82000 # 设置每个进程的可用的最大文件描述符
    log 127.0.0.1 local0 err # 定义haproxy日志输出设置
    stats timeout 30s

defaults
    log global # 引入global定义的日志格式
    mode http # 所处理的类别(7层代理http,4层代理tcp)
    option httplog # 日志类别为http日志格式
    timeout connect 10s # 默认连接超时时间
    timeout client 1m # 默认客户端超时时间
    timeout server 1m # 默认服务器超时时间
    timeout http-request 15s # 默认http请求超时时间
    timeout http-keep-alive 15s # 默认持久连接超时时间
    timeout check 10s # 设置心跳检查超时时间
    balance roundrobin # 设置默认负载均衡方式,轮询方式

frontend k8s-master
    bind 0.0.0.0:16443
    mode tcp
    option tcplog
    tcp-request inspect-delay 5s
    default_backend k8s-master

backend k8s-master
    mode tcp
    option tcplog
    option tcp-check
    balance roundrobin
    server  master1  172.16.48.133:6443 check
    server  master2  172.16.48.134:6443 check
    server  master3  172.16.48.135:6443 check

listen status # Frontend和Backend的组合体,监控组的名称,按需自定义名称
         bind 0.0.0.0:8888 # 监听端口
         mode http # http的7层模式
         log 127.0.0.1 local3 err # 错误日志记录
         stats refresh 5s # 每隔5秒自动刷新监控页面
         stats uri /admin?stats # 监控页面的url访问路径
         stats realm itnihao\ welcome #监控页面的提示信息
         stats auth admin:admin # 监控页面的用户和密码admin,可以设置多个用户名
         stats hide-version # 隐藏统计页面上的HAproxy版本信息
         stats admin if TRUE # 手工启用/禁用,后端服务器(haproxy-1.4.9以后版本)
EOF

配置keepalived

# 备份文件
cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
# 写入配置
# master1 配置:
cat > /etc/keepalived/keepalived.conf << EOF
global_defs {
   router_id k8s-master # 主机标识,用于邮件通知
   script_user root # 指定运行脚本的用户名和组。默认使用用户的默认组。如未指定,默认为keepalived_script 用户,如无此用户,则使用root
   enable_script_security # 如过路径为非root可写,不要配置脚本为root用户执行
}
vrrp_script chk_apiserver { # VRRP 脚本声明
   script "/etc/keepalived/check_apiserver.sh" # 周期性执行的脚本
   interval 5 # 运行脚本的间隔时间,秒
   weight -5 # 权重,priority值减去此值要小于备服务的priority值 下面会详细分析
   fall 3 # 检测几次失败才为失败,整数
   rise 2 #检测几次状态为正常的,才确认正常,整数
}
vrrp_instance VI_1 { # 虚拟路由器名称,在一个keepalived可以启多个虚拟路由器,每个虚拟路由器的名字都不一样
   state MASTER  # 当前节点在此虚拟路由器上的初始状态,状态为MASTER或者BACKUP,一般都是配置backup,该值无法决定身份,最终还是通过比较priority
   interface ens33 # 绑定为当前虚拟路由器使用的物理接口,如:ens33,eth0
   virtual_router_id 172 # 每个虚拟路由器惟一标识,范围:0-255,同一组虚拟路由器的vrid必须一致
   priority 100 # 当前物理节点在此虚拟路由器的优先级,范围:1-254,每个keepalived主机节点此值不同
   advert_int 2  # vrrp通告的时间间隔,默认1s

   authentication { # 认证机制
       auth_type PASS
       auth_pass 88888888 # 预共享密钥,仅前8位有效,同一个虚拟路由器的多个keepalived节点必须一样
   }

   virtual_ipaddress { # 虚拟IP
       172.16.48.200 # 指定VIP
   }
   track_script { # 脚本监控状态
      chk_apiserver
   }
}
EOF

#master2 配置:
cat > /etc/keepalived/keepalived.conf << EOF
global_defs {
   router_id k8s-master
   script_user root
   enable_script_security
}
vrrp_script chk_apiserver {
   script "/etc/keepalived/check_apiserver.sh"
   interval 5
   weight -5
   fall 3 
   rise 2
}
vrrp_instance VI_1 {
   state BACKUP
   interface ens33
   virtual_router_id 172
   priority 99
   advert_int 2
   authentication {
       auth_type PASS
       auth_pass 88888888
   }
   virtual_ipaddress {
       172.16.48.200
   }
   track_script {
      chk_apiserver
   }
}
EOF

#master3 配置:
cat > /etc/keepalived/keepalived.conf << EOF
global_defs {
   router_id k8s-master
script_user root
   enable_script_security
}
vrrp_script chk_apiserver {
   script "/etc/keepalived/check_apiserver.sh"
   interval 5
   weight -5
   fall 3 
   rise 2
}
vrrp_instance VI_1 {
   state BACKUP
   interface ens33
   virtual_router_id 172
   priority 98
   advert_int 2
   authentication {
       auth_type PASS
       auth_pass 88888888
   }
   virtual_ipaddress {
       172.16.48.200
   }
    track_script {
      chk_apiserver
   }
EOF

检测脚本

cat > /etc/keepalived/check_apiserver.sh <<"EOF"
#!/bin/bash
function errorExit() {
   exit 1
}
curl --silent --max-time 2 --insecure https://localhost:6443/ -o /dev/null || errorExit
curl --silent --max-time 2 --insecure https://172.16.48.200:6443/ -o /dev/null || errorExit
EOF
# 添加执行权限
chmod +x /etc/keepalived/check_apiserver.sh

启动服务

systemctl enable --now haproxy
systemctl enable --now keepalived

查看状态/日志

#master1,看到vip
ip addr
#各节点测试
ping 172.16.48.200 -c 3
# 检查状态
systemctl status haproxy
systemctl status keepalived
# 查看日志
journalctl -f -u haproxy
journalctl -f -u keepalived
# 访问测试

master节点操作内容

etcd集群

创建证书

下载cfssl工具

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64 
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo

创建证书目录

mkdir -p /data/k8s/cert
cd /data/k8s/cert

配置ca证书请求文件

cat > ca-csr.json << EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing"
            "O": "kubernetes",
            "OU": "system"
        }
    ]
}
EOF

生成证书和私钥

[root@master cert]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca 
[root@master cert]# ls
ca.csr  ca-csr.json  ca-key.pem  ca.pem

创建ca证书策略

cat > ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

生成etcd的csr文件

解释:
CN : Common Name ,apiserver 从证书中提取该字段作为请求的用户名 (User Name)
Organization ,apiserver 从证书中提取该字段作为请求用户所属的组

cat > etcd-csr.json << EOF
{
    "CN": "etcd",
    "hosts": [
    "172.16.48.133",
    "172.16.48.134",
    "172.16.48.135"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing"
        }
    ]
}
EOF

使用自签 CA 签发 Etcd HTTPS 证书

[root@master cert]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
[root@master cert]# ls
ca-config.json  ca.csr  ca-csr.json  ca-key.pem  ca.pem  etcd.csr  etcd-csr.json  etcd-key.pem  etcd.pem

配置启动etcd

下载包

下载地址:https://github.com/etcd-io/etcd/releases/download/v3.4.9/etcd-v3.4.9-linux-amd64.tar.gz

拷贝命令文件

# 下载
wget https://github.com/etcd-io/etcd/releases/download/v3.4.9/etcd-v3.4.9-linux-amd64.tar.gz
# 解压
tar -zxvf etcd-v3.4.9-linux-amd64.tar.gz
# 创建etcd目录:
mkdir /data/app/etcd/{cfg,bin,ssl} -p
# 拷贝命令文件
mv etcd-v3.4.9-linux-amd64/{etcd,etcdctl} /data/app/etcd/bin

创建etcd配置文件

# 创建数据存储目录
mkdir /data/db/etcd/
cd /data/app/etcd/cfg/

cat > /data/app/etcd/cfg/etcd.conf  << EOF
#[Member]
ETCD_NAME="etcd01"
ETCD_DATA_DIR="/data/db/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://172.16.48.133:2380"
ETCD_LISTEN_CLIENT_URLS="https://172.16.48.133:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://172.16.48.133:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://172.16.48.133:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://172.16.48.133:2380,etcd02=https://172.16.48.134:2380,etcd03=https://172.16.48.135:2380"

ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

参数解释:

  • ETCD_NAME 节点名称,每个节点名称不一样
  • ETCD_DATA_DIR 存储数据目录(他是一个数据库,不是存在内存的,存在硬盘中的,所有和k8s有关的信息都会存到etcd里面的)
  • ETCD_LISTEN_PEER_URLS 集群通信监听地址
  • ETCD_LISTEN_CLIENT_URLS 客户端访问监听地址
  • ETCD_INITIAL_ADVERTISE_PEER_URLS 集群通告地址
  • ETCD_ADVERTISE_CLIENT_URLS 客户端通告地址
  • ETCD_INITIAL_CLUSTER 集群节点地址
  • ETCD_INITIAL_CLUSTER_TOKEN 集群Token
  • ETCD_INITIAL_CLUSTER_STATE 加入集群的当前状态,new是新集群,existing表示加入已有集群

创建etcd systemd服务

cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/data/app/etcd/cfg/etcd.conf
ExecStart=/data/app/etcd/bin/etcd \\
 --cert-file=/data/app/etcd/ssl/etcd.pem \\
 --key-file=/data/app/etcd/ssl/etcd-key.pem \\
 --peer-cert-file=/data/app/etcd/ssl/etcd.pem \\
 --peer-key-file=/data/app/etcd/ssl/etcd-key.pem \\
 --trusted-ca-file=/data/app/etcd/ssl/ca.pem \\
 --peer-trusted-ca-file=/data/app/etcd/ssl/ca.pem \\
 --logger=zap
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

把刚才生成的证书拷贝到配置文件中的位置

cp /data/k8s/cert/ca*pem /data/app/etcd/ssl/
cp /data/k8s/cert/etcd*pem /data/app/etcd/ssl/

拷贝到其他master主机

scp -P 22 -r /data/app/etcd/ root@master2:/data/app/
scp -P 22 -r /data/app/etcd/ root@master3:/data/app/
scp -P 22 /usr/lib/systemd/system/etcd.service  root@master2:/usr/lib/systemd/system/
scp -P 22 /usr/lib/systemd/system/etcd.service  root@master3:/usr/lib/systemd/system/
# 修改master2和master3	配置文件中IP地址和etcd名称,/data/app/etcd/cfg
  • 错误提示:
scp: /data/app/: No such file or directory

手动在master2和master3上创建/data/app目录

mkdir /data/app -p

启动etcd服务

systemctl daemon-reload
systemctl enable --now etcd
# 查看日志
journalctl -f -u etcd

查看集群状态

# 第一种
[root@master3 cfg]# ETCDCTL_API=3 /data/app/etcd/bin/etcdctl --write-out=table --cacert=/data/app/etcd/ssl/ca.pem --cert=/data/app/etcd/ssl/etcd.pem --key=/data/app/etcd/ssl/etcd-key.pem --endpoints=https://172.16.48.133:2379,https://172.16.48.134:2379,https://172.16.48.135:2379 endpoint health
+----------------------------+--------+-------------+-------+
|          ENDPOINT          | HEALTH |    TOOK     | ERROR |
+----------------------------+--------+-------------+-------+
| https://172.16.48.133:2379 |   true | 20.604842ms |       |
| https://172.16.48.134:2379 |   true | 20.273784ms |       |
| https://172.16.48.135:2379 |   true | 24.409651ms |       |
+----------------------------+--------+-------------+-------+
# 第二种
[root@master3 cfg]# ETCDCTL_API=3 /data/app/etcd/bin/etcdctl --cacert=/data/app/etcd/ssl/ca.pem --cert=/data/app/etcd/ssl/etcd.pem --key=/data/app/etcd/ssl/etcd-key.pem --endpoints="https://172.16.48.133:2379,https://172.16.48.134:2379,https://172.16.48.135:2379" endpoint health
https://172.16.48.133:2379 is healthy: successfully committed proposal: took = 18.331765ms
https://172.16.48.135:2379 is healthy: successfully committed proposal: took = 21.549524ms
https://172.16.48.134:2379 is healthy: successfully committed proposal: took = 20.135804ms

kube-apiserver

创建服务目录(在3台master节点创建)

mkdir /data/app/kubernetes/{bin,cfg,ssl,logs} -p

下载命令文件

官方下载地址:https://github.com/kubernetes/kubernetes/releases

wget https://dl.k8s.io/v1.23.0/kubernetes-server-linux-amd64.tar.gz
tar -xvf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin
cp kube-apiserver kube-controller-manager kubectl kubelet kube-proxy kube-scheduler /data/app/kubernetes/bin/

更新环境变量

vim /etc/profile
PATH=/data/app/kubernetes/bin/:$PATH
source /etc/profile

证书申请

创建证书目录

⚠️:etcd/apiserver/kubelet CA证书不能复用,需要单独创建3个CA 机构证书

mkdir -p /data/k8s/cert/
cd /data/k8s/cert/

使用自签 CA 签发 kube-apiserver HTTPS 证书

写入集群各节点的IP地址,包含VIP,最后一个IP为service的地址;一般为定义k8s集群网段首个IP

cat > kube-apiserver-csr.json <<  EOF
{
"CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "172.16.48.133",
    "172.16.48.134",
    "172.16.48.135",
    "172.16.48.131",
    "172.16.48.132",
    "172.16.48.200",
    "10.50.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "kubernetes",
      "OU": "system"
    }
  ]
}
EOF

生成证书

[root@master cerc]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiserver
[root@master cert]# ls kube-apiserver*
kube-apiserver.csr  kube-apiserver-csr.json  kube-apiserver-key.pem  kube-apiserver.pem

拷贝到服务目录

cp /data/k8s/cert/ca*pem /data/app/kubernetes/ssl/
cp /data/k8s/cert/kube-apiserver*pem /data/app/kubernetes/ssl/

启用 TLS Bootstrapping 机制;生成token文件

TLS Bootstraping:Master apiserver 启用 TLS 认证后,Node 节点 kubelet 和 kube- proxy 要与 kube-apiserver 进行通信,必须使用 CA 签发的有效证书才可以,当 Node 节点很多时,这种客户端证书颁发需要大量工作,同样也会增加集群扩展复杂度。为了简化流程,Kubernetes 引入了 TLS bootstraping 机制来自动颁发客户端证书,kubelet 会以一个低权限用户自动向 apiserver 申请证书,kubelet 的证书由 apiserver 动态签署。
所以强烈建议在 Node 上使用这种方式,目前主要用于 kubelet,kube-proxy 还是由我 们统一颁发一个证书

cat > /data/app/kubernetes/cfg/token.csv << EOF
$(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:node-bootstrapper"
EOF

创建kube-apiserver配置文件

拷贝到其他master节点后,修改对应的绑定IP172.16.48.133

cd /data/app/kubernetes/cfg
cat > kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \\
  --anonymous-auth=false \\
  --bind-address=172.16.48.133 \\
  --secure-port=6443 \\
  --advertise-address=172.16.48.133 \\
  --insecure-port=0 \\
  --authorization-mode=Node,RBAC \\
  --runtime-config=api/all=true \\
  --enable-bootstrap-token-auth \\
  --service-cluster-ip-range=10.50.0.0/16 \\
  --token-auth-file=/data/app/kubernetes/cfg/token.csv \\
  --service-node-port-range=30000-50000 \\
  --tls-cert-file=/data/app/kubernetes/ssl/kube-apiserver.pem  \\
  --tls-private-key-file=/data/app/kubernetes/ssl/kube-apiserver-key.pem \\
  --client-ca-file=/data/app/kubernetes/ssl/ca.pem \\
  --kubelet-client-certificate=/data/app/kubernetes/ssl/kube-apiserver.pem \\
  --kubelet-client-key=/data/app/kubernetes/ssl/kube-apiserver-key.pem \\
  --service-account-key-file=/data/app/kubernetes/ssl/ca-key.pem \\
  --service-account-signing-key-file=/data/app/kubernetes/ssl/ca-key.pem  \\
  --service-account-issuer=api \\
  --etcd-cafile=/data/app/etcd/ssl/ca.pem \\
  --etcd-certfile=/data/app/etcd/ssl/etcd.pem \\
  --etcd-keyfile=/data/app/etcd/ssl/etcd-key.pem \\
  --etcd-servers=https://172.16.48.133:2379,https://172.16.48.134:2379,https://172.16.48.135:2379 \\
  --enable-swagger-ui=true \\
  --allow-privileged=true \\
  --apiserver-count=3 \\
  --audit-log-maxage=30 \\
  --audit-log-maxbackup=3 \\
  --audit-log-maxsize=100 \\
  --audit-log-path=/data/app/kubernetes/logs/kube-apiserver-audit.log \\
  --event-ttl=1h \\
  --alsologtostderr=true \\
  --logtostderr=false \\
  --log-dir=/data/app/kubernetes/logs \\
  --v=4"
EOF

注释

  • --logtostderr:启用日志
  • --v:日志等级
  • --log-dir:日志目录
  • --etcd-servers:etcd 集群地址
  • --bind-address:监听地址
  • --secure-port:https 安全端口
  • --advertise-address:集群通告地址
  • --allow-privileged:启用授权
  • --service-cluster-ip-range:Service 虚拟 IP 地址段
  • --enable-admission-plugins:准入控制模块
  • --authorization-mode:认证授权,启用 RBAC 授权和节点自管理
  • --enable-bootstrap-token-auth:启用 TLS bootstrap 机制
  • --token-auth-file:bootstrap token 文件
  • --service-node-port-range:Service nodeport 类型默认分配端口范围
  • --kubelet-client-xxx:apiserver 访问 kubelet 客户端证书
  • --tls-xxx-file:apiserver https 证书
  • --etcd-xxxfile:连接 Etcd 集群证书
  • --audit-log-xxx:审计日志

创建systemd启动文件

cat > /usr/lib/systemd/system/kube-apiserver.service << EOF 
[Unit]
Description=Kubernetes API Server 
Documentation=https://github.com/kubernetes/kubernetes
After=etcd.service
Wants=etcd.service

[Service]
EnvironmentFile=/data/app/kubernetes/cfg/kube-apiserver.conf 
ExecStart=/data/app/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS 
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

拷贝到其他master节点

scp -P 22 -r /data/app/kubernetes/ root@master2:/data/app/
scp -P 22 -r /data/app/kubernetes/ root@master3:/data/app/

scp -P 22  /usr/lib/systemd/system/kube-apiserver.service root@master2:/usr/lib/systemd/system/
scp -P 22  /usr/lib/systemd/system/kube-apiserver.service root@master3:/usr/lib/systemd/system/

启动服务

systemctl daemon-reload
systemctl enable --now kube-apiserver
# 检查状态
systemctl status kube-apiserver
# 查看日志
journalctl -f -u kube-apiserver

测试访问

[root@master cfg]# curl --insecure https://master:6443/
{
  "kind": "Status",
  "apiVersion": "v1",
  "metadata": {},
  "status": "Failure",
  "message": "Unauthorized",
  "reason": "Unauthorized",
  "code": 401
}
[root@master cfg]# curl --insecure https://master2:6443/
{
  "kind": "Status",
  "apiVersion": "v1",
  "metadata": {},
  "status": "Failure",
  "message": "Unauthorized",
  "reason": "Unauthorized",
  "code": 401
}
[root@master cfg]# curl --insecure https://master3:6443/
{
  "kind": "Status",
  "apiVersion": "v1",
  "metadata": {},
  "status": "Failure",
  "message": "Unauthorized",
  "reason": "Unauthorized",
  "code": 401
}
[root@master cfg]# curl --insecure https://172.16.48.200:16443/
{
  "kind": "Status",
  "apiVersion": "v1",
  "metadata": {},
  "status": "Failure",
  "message": "Unauthorized",
  "reason": "Unauthorized",
  "code": 401
}

kubectl

kubectl 与apiserver https安全端口通讯,apiserver对提供的证书进行认证授权。kubectl作为集群的管理工具,需要被授予最高的权限。

证书申请

使用自签 CA 签发 kubectl HTTPS 证书

cd  /data/k8s/cert/
cat > kubectl-csr.json << EOF
{
    "CN": "admin",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "Beijing",
            "L": "Beijing",
            "O": "system:masters",
            "OU": "system"
        }
    ]
}
EOF

生成证书

[root@master cert]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kubectl-csr.json | cfssljson -bare kubectl
[root@master cert]# ls kubectl*
kubectl.csr  kubectl-csr.json  kubectl-key.pem  kubectl.pem

生成配置文件

kubeconfig为kubectl的配置文件,包含访问apiserver的所有信息,如apiserver地址、CA证书和自身使用的证书

cd /data/k8s/cert
# 设置集群参数,此处用VIP地址
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://172.16.48.133:6443 --kubeconfig=kube.config
# 设置客户端认证参数
kubectl config set-credentials admin --client-certificate=kubectl.pem --client-key=kubectl-key.pem --embed-certs=true --kubeconfig=kube.config
# 设置上下文参数
kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kube.config
# 设置默认上下文
kubectl config use-context kubernetes --kubeconfig=kube.config
mkdir -p ~/.kube
cp kube.config ~/.kube/config

将kubectl配置文件进行角色绑定

kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes --kubeconfig=~/.kube/config

kubectl get clusterrolebinding # 可以查看绑定

验证测试

# 查看集群信息
[root@master cert]# kubectl cluster-info
Kubernetes control plane is running at https://172.16.48.133:6443

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
# 查看命名空间中资源对象
[root@master cert]# kubectl get all -A
NAMESPACE   NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
default     service/kubernetes   ClusterIP   10.50.0.1    <none>        443/TCP   99m
# 查看组件
[root@master cert]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS      MESSAGE                                                                                        ERROR
scheduler            Unhealthy   Get "https://127.0.0.1:10259/healthz": dial tcp 127.0.0.1:10259: connect: connection refused
controller-manager   Unhealthy   Get "https://127.0.0.1:10257/healthz": dial tcp 127.0.0.1:10257: connect: connection refused
etcd-0               Healthy     {"health":"true"}

拷贝到其他master主机

scp -P 22 -r ~/.kube/config root@master2:~/.kube/
scp -P 22 -r ~/.kube/config root@master3:~/.kube/

kube-controller-manager

证书申请

使用自签 CA 签发 HTTPS 证书

cd  /data/k8s/cert/
cat > kube-controller-manager-csr.json << EOF
{
    "CN": "system:kube-controller-manager",
    "hosts": [
        "127.0.0.1",
        "172.16.48.133",
        "172.16.48.134",
        "172.16.48.135",
        "172.16.48.200"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "Beijing",
            "L": "Beijing",
            "O": "system:kube-controller-manager",
            "OU": "system"
        }
    ]
}
EOF

说明:
hosts 为kube-controller-manager所在的节点IP地址
O 为kubernetes内置的ClusterRoleBindings system:system:kube-controller-manager 赋予system:kube-controller-manager工作权限

生成证书

[root@master cert]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
[root@master cert]# ls kube-controller-manager*
kube-controller-manager.csr       kube-controller-manager-key.pem
kube-controller-manager-csr.json  kube-controller-manager.pem

生成配置文件

kubeconfig为kubectl的配置文件,包含访问apiserver的所有信息,如apiserver地址、CA证书和自身使用的证书

# 设置集群参数,此处用VIP地址
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://172.16.48.133:6443 --kubeconfig=kube-controller-manager.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=kube-controller-manager.kubeconfig
# 设置上下文参数
kubectl config set-context default --cluster=kubernetes --user=kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
# 设置默认上下文
kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig

拷贝到服务目录

cp /data/k8s/cert/kube-controller-manager*.pem /data/app/kubernetes/ssl/
cp /data/k8s/cert/kube-controller-manager.kubeconfig /data/app/kubernetes/ssl/

创建配置文件

  • --master:通过本地非安全本地端口 8080 连接 apiserver。
  • --leader-elect:当该组件启动多个时,自动选举(HA)
  • --cluster-signing-cert-file/–cluster-signing-key-file: 自动为 kubelet 颁发证书 的 CA,与 apiserver 保持一致
cd /data/app/kubernetes/cfg
cat > kube-controller-manager.conf << EOF 
KUBE_CONTROLLER_MANAGER_OPTS="--secure-port=10257 \\
--v=2 \\
--log-dir=/data/app/kubernetes/logs \\
--kubeconfig=/data/app/kubernetes/ssl/kube-controller-manager.kubeconfig \\
--leader-elect=true \\
--allocate-node-cidrs=true \\
--cluster-name=kubernetes \\
--cluster-cidr=10.200.0.0/16 \\
--service-cluster-ip-range=10.50.0.0/16 \\
--cluster-signing-cert-file=/data/app/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/data/app/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/data/app/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/data/app/kubernetes/ssl/ca-key.pem \\
--tls-cert-file=/data/app/kubernetes/ssl/kube-controller-manager.pem \\
--tls-private-key-file=/data/app/kubernetes/ssl/kube-controller-manager-key.pem \\
--feature-gates=RotateKubeletServerCertificate=true \\
--controllers=*,bootstrapsigner,tokencleaner \\
--horizontal-pod-autoscaler-sync-period=10s \\
--use-service-account-credentials=true \\
--alsologtostderr=true \\
--logtostderr=false \\
--experimental-cluster-signing-duration=87600h"
EOF

创建systemd启动

cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF 
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/data/app/kubernetes/cfg/kube-controller-manager.conf
ExecStart=/data/app/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

拷贝到其他master节点

scp -P 22 -r /data/app/kubernetes/ssl/kube-controller-manager*.pem root@master2:/data/app/kubernetes/ssl/
scp -P 22 -r /data/app/kubernetes/ssl/kube-controller-manager*.pem root@master3:/data/app/kubernetes/ssl/

scp -P 22 -r /data/app/kubernetes/ssl/kube-controller-manager.kubeconfig root@master2:/data/app/kubernetes/ssl/
scp -P 22 -r /data/app/kubernetes/ssl/kube-controller-manager.kubeconfig root@master3:/data/app/kubernetes/ssl/

scp -P 22 -r /data/app/kubernetes/cfg/kube-controller-manager.conf root@master2:/data/app/kubernetes/cfg/
scp -P 22 -r /data/app/kubernetes/cfg/kube-controller-manager.conf root@master3:/data/app/kubernetes/cfg/

scp -P 22  /usr/lib/systemd/system/kube-controller-manager.service root@master2:/usr/lib/systemd/system/
scp -P 22  /usr/lib/systemd/system/kube-controller-manager.service root@master3:/usr/lib/systemd/system/

启动服务

systemctl daemon-reload
systemctl enable --now kube-controller-manager
# 检查状态
systemctl status kube-controller-manager
# 查看日志
journalctl -f -u kube-controller-manager

测试访问

[root@master ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS      MESSAGE                                                                                        ERROR
scheduler            Unhealthy   Get "https://127.0.0.1:10259/healthz": dial tcp 127.0.0.1:10259: connect: connection refused
controller-manager   Healthy     ok
etcd-0               Healthy     {"health":"true"}

kube-scheduler

证书申请

使用自签 CA 签发 HTTPS 证书

cd  /data/k8s/cert/
cat > kube-scheduler-csr.json << EOF
{
    "CN": "system:kube-scheduler",
    "hosts": [
        "127.0.0.1",
        "172.16.48.133",
        "172.16.48.134",
        "172.16.48.135",
        "172.16.48.200"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "Beijing",
            "L": "Beijing",
            "O": "system:kube-scheduler",
            "OU": "system"
        }
    ]
}
EOF

生成证书

[root@master cert]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
[root@master cert]# ls kube-scheduler*
kube-scheduler.csr  kube-scheduler-csr.json  kube-scheduler-key.pem  kube-scheduler.pem

生成配置文件

kubeconfig为kubectl的配置文件,包含访问apiserver的所有信息,如apiserver地址、CA证书和自身使用的证书

# 设置集群参数,此处用VIP地址
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://172.16.48.133:6443 --kubeconfig=kube-scheduler.kubeconfig
# 设置客户端认证
kubectl config set-credentials system:kube-scheduler --client-certificate=kube-scheduler.pem --client-key=kube-scheduler-key.pem --embed-certs=true --kubeconfig=kube-scheduler.kubeconfig
# 设置上下文参数
kubectl config set-context default --cluster=kubernetes --user=kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
# 切换上下文
kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig

拷贝到服务目录

cp /data/k8s/cert/kube-scheduler*.pem /data/app/kubernetes/ssl/
cp /data/k8s/cert/kube-scheduler.kubeconfig /data/app/kubernetes/ssl/

创建配置文件

cd /data/app/kubernetes/cfg

cat > kube-scheduler.conf << EOF
KUBE_SCHEDULER_OPTS="--address=127.0.0.1 \\
  --kubeconfig=/data/app/kubernetes/ssl/kube-scheduler.kubeconfig \\
  --leader-elect=true \\
  --alsologtostderr=true \\
  --logtostderr=false \\
  --log-dir=/data/logs/kubernetes \\
  --v=4"
EOF

创建systemd启动

cat > /usr/lib/systemd/system/kube-scheduler.service << EOF 
[Unit]
Description=Kubernetes scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target network-online.target
Wants=network-online.target

[Service]
EnvironmentFile=/data/app/kubernetes/cfg/kube-scheduler.conf
ExecStart=/data/app/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5
LimitNOFILE=65535

[Install]
WantedBy=multi-user.target
EOF

拷贝到其他master节点

scp -P 22 -r /data/app/kubernetes/ssl/kube-scheduler.kubeconfig root@master2:/data/app/kubernetes/ssl/
scp -P 22 -r /data/app/kubernetes/ssl/kube-scheduler.kubeconfig root@master3:/data/app/kubernetes/ssl/

scp -P 22 -r /data/app/kubernetes/ssl/kube-scheduler*.pem root@master2:/data/app/kubernetes/ssl/
scp -P 22 -r /data/app/kubernetes/ssl/kube-scheduler*.pem root@master3:/data/app/kubernetes/ssl/

scp -P 22 -r /data/app/kubernetes/cfg/kube-scheduler.conf root@master2:/data/app/kubernetes/cfg/
scp -P 22 -r /data/app/kubernetes/cfg/kube-scheduler.conf root@master3:/data/app/kubernetes/cfg/

scp -P 22  /usr/lib/systemd/system/kube-scheduler.service root@master2:/usr/lib/systemd/system/
scp -P 22  /usr/lib/systemd/system/kube-scheduler.service root@master3:/usr/lib/systemd/system/

启动服务

systemctl daemon-reload
systemctl enable --now kube-scheduler
# 检查状态
systemctl status kube-scheduler
# 查看日志
journalctl -f -u kube-scheduler

测试访问

[root@master ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok
controller-manager   Healthy   ok
etcd-0               Healthy   {"health":"true"}

work节点操作内容

因kubectl命令和证书都在master节点上,所以生成证书和kubeconfig文件在master节点操作,然后将所需文件同步给work节点

创建work服务目录

mkdir -p /data/app/kubernetes/{bin,cfg,ssl,logs}

同步kube命令

cd kubernetes/server/bin/
scp kubelet root@node2:/data/app/kubernetes/bin
scp kubelet root@node3:/data/app/kubernetes/bin
scp kube-proxy root@node2:/data/app/kubernetes/bin
scp kube-proxy root@node3:/data/app/kubernetes/bin

安装docker

官方文档:https://docs.docker.com/engine/install/centos/

设置存储库

安装yum-utils软件包(提供yum-config-manager 实用程序)并设置稳定的存储库

yum install -y yum-utils
yum-config-manager \
   --add-repo \
   https://download.docker.com/linux/centos/docker-ce.repo

安装DOCKER引擎

安装最新版本的Docker Engine和容器

yum -y install docker-ce docker-ce-cli containerd.io

卸载

yum -y remove docker-ce docker-ce-cli containerd.io
rm -rf /var/lib/docker 

启动

systemctl enable -now docker

设置docker数据目录等参数

按自己所需配置;下方两个参数是必须有的
“registry-mirrors”: [“https://b9pmyelo.mirror.aliyuncs.com”]
“exec-opts”: [“native.cgroupdriver=systemd”]

cat > /etc/docker/daemon.json << EOF 
{
	"graph":"/data/docker" 
    "log-driver": "json-file"
    "log-opts": {
      "max-size": "100m"
    }
    "exec-opts": ["native.cgroupdriver=systemd"]
	"registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"] 
}
EOF

kubelet

在master授权kubelet-bootstrap用户允许请求证书

kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

在master生成配置文件

cd /data/app/kubernetes/ssl

BOOTSTRAP_TOKEN=$(awk -F "," '{print $1}' /data/app/kubernetes/cfg/token.csv)

kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://172.16.48.133:6443 --kubeconfig=kubelet-bootstrap.kubeconfig

kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=kubelet-bootstrap.kubeconfig

kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig

kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig

work 节点配置参数文件

修改对应的IP地址

cat > /data/app/kubernetes/cfg/kubelet-config.yml << EOF
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /data/app/kubernetes/ssl/ca.pem
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.50.0.10
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /data/app/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
EOF

work节点创建配置文件

cd /data/app/kubernetes/cfg

cat > kubelet.conf << EOF
KUBELET_OPTS="--logtostderr=false \\
  --v=2 \\
  --log-dir=/data/logs/kubernetes \\
  --network-plugin=cni \\
  --kubeconfig=/data/app/kubernetes/ssl/kubelet.kubeconfig \\
  --bootstrap-kubeconfig=/data/app/kubernetes/ssl/kubelet-bootstrap.kubeconfig \\
  --config=/data/app/kubernetes/cfg/kubelet-config.yml \\
  --cert-dir=/data/app/kubernetes/ssl \\
  --pod-infra-container-image=kubernetes/pause"
EOF

解释

  • --hostname-override:显示名称,集群中唯一
  • --network-plugin:启用 CNI
  • --kubeconfig:空路径,会自动生成,后面用于连接 apiserver
  • --bootstrap-kubeconfig:首次启动向 apiserver 申请证书
  • --config:配置参数文件
  • --cert-dir:kubelet 证书生成目录
  • --pod-infra-container-image:管理 Pod 网络容器的镜像

创建systemd启动

cat > /usr/lib/systemd/system/kubelet.service << EOF 
[Unit]
Description=Kubernetes Kubelet
After=docker.service

[Service]
EnvironmentFile=/data/app/kubernetes/cfg/kubelet.conf
ExecStart=/data/app/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
RestartSec=5
LimitNOFILE=65535

[Install]
WantedBy=multi-user.target
EOF

同步到work节点

scp /data/app/kubernetes/ssl/ca.pem root@node2:/data/app/kubernetes/ssl
scp /data/app/kubernetes/ssl/ca.pem root@node3:/data/app/kubernetes/ssl

scp /data/app/kubernetes/ssl/kubelet-bootstrap.kubeconfig root@node2:/data/app/kubernetes/ssl
scp /data/app/kubernetes/ssl/kubelet-bootstrap.kubeconfig root@node3:/data/app/kubernetes/ssl

scp /data/app/kubernetes/cfg/kubelet-config.yml root@node2:/data/app/kubernetes/cfg
scp /data/app/kubernetes/cfg/kubelet-config.yml root@node3:/data/app/kubernetes/cfg

scp /data/app/kubernetes/cfg/kubelet.conf root@node2:/data/app/kubernetes/cfg
scp /data/app/kubernetes/cfg/kubelet.conf root@node3:/data/app/kubernetes/cfg

scp /usr/lib/systemd/system/kubelet.service root@node2:/usr/lib/systemd/system/
scp /usr/lib/systemd/system/kubelet.service root@node3:/usr/lib/systemd/system/

启动服务

systemctl daemon-reload
systemctl enable --now kubelet
# 检查状态
systemctl status kubelet
# 查看日志
journalctl -f -u kubelet

在master上批准CSR请求

注:由于网络插件还没有部署,节点会没有准备就绪 NotReady
批准前的状态时Pending
批准之后的状态是Approved,Issued

# 查看kubelet证书请求
[root@master ssl]# kubectl get csr
NAME                                                   AGE   SIGNERNAME                                    REQUESTOR           REQUESTEDDURATION   CONDITION
node-csr-0cd5ulaYThPu9Vw_2fQDUSjZlj6E3dI4Y5BmVCkZk1E   21s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   <none>              Pending
# 批准
[root@master ssl]# kubectl certificate approve node-csr-0cd5ulaYThPu9Vw_2fQDUSjZlj6E3dI4Y5BmVCkZk1E
certificatesigningrequest.certificates.k8s.io/node-csr-0cd5ulaYThPu9Vw_2fQDUSjZlj6E3dI4Y5BmVCkZk1E approved
# 查看
[root@master ssl]# kubectl get csr
NAME                                                   AGE     SIGNERNAME                                    REQUESTOR           REQUESTEDDURATION   CONDITION
node-csr-0cd5ulaYThPu9Vw_2fQDUSjZlj6E3dI4Y5BmVCkZk1E   3m49s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   <none>              Approved,Issue
# 查看节点
[root@master ssl]# kubectl get node
NAME     STATUS     ROLES    AGE   VERSION
master   NotReady   <none>   70s   v1.23.0

kube-proxy

证书申请

使用自签 CA 签发 HTTPS 证书

cd  /data/k8s/cert/
cat > kube-proxy-csr.json << EOF
{
    "CN": "system:kube-proxy",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "Beijing",
            "L": "Beijing",
            "O": "kubernetes",
            "OU": "system"
        }
    ]
}
EOF

生成证书

[root@master cert]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
[root@master cert]# ls kube-proxy*
kube-proxy.csr  kube-proxy-csr.json  kube-proxy-key.pem  kube-proxy.pem

生成kubeconfig文件

cd  /data/k8s/cert/
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://172.16.48.133:6443 --kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

work创建配置文件

cd /data/app/kubernetes/cfg

cat > kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--config=/data/app/kubernetes/cfg/kube-proxy-config.yml \\
  --alsologtostderr=true \\
  --logtostderr=false \\
  --log-dir=/data/logs/kubernetes \\
  --v=2"
EOF

work 配置参数文件

修改对应绑定IP

cd /data/app/kubernetes/cfg
cat > kube-proxy-config.yml << EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 172.16.48.133
clientConnection:
 kubeconfig: /data/app/kubernetes/ssl/kube-proxy.kubeconfig
clusterCIDR: 10.200.0.0/16
healthzBindAddress: 172.16.48.133:10256
kind: KubeProxyConfiguration
metricsBindAddress: 172.16.48.133:10249
mode: "ipvs"
EOF

创建systemd启动

cat > /usr/lib/systemd/system/kube-proxy.service << EOF 
[Unit]
Description=Kubernetes Proxy
After=network.target

[Service] 
EnvironmentFile=/data/app/kubernetes/cfg/kube-proxy.conf
ExecStart=/data/app/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS 
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

同步到work节点

scp /data/app/kubernetes/ssl/kube-proxy*.pem root@node2:/data/app/kubernetes/ssl/
scp /data/app/kubernetes/ssl/kube-proxy*.pem root@node3:/data/app/kubernetes/ssl/

scp /data/app/kubernetes/ssl/kube-proxy.kubeconfig root@node2:/data/app/kubernetes/ssl/
scp /data/app/kubernetes/ssl/kube-proxy.kubeconfig root@node3:/data/app/kubernetes/ssl/

scp /data/app/kubernetes/cfg/kube-proxy-config.yml root@node2:/data/app/kubernetes/cfg
scp /data/app/kubernetes/cfg/kube-proxy-config.yml root@node3:/data/app/kubernetes/cfg


scp /usr/lib/systemd/system/kube-proxy.service root@node2:/usr/lib/systemd/system/
scp /usr/lib/systemd/system/kube-proxy.service root@node3:/usr/lib/systemd/system/

启动服务

systemctl daemon-reload
systemctl enable --now kube-proxy
# 检查状态
systemctl status kube-proxy
# 查看日志
journalctl -f -u kube-proxy

master安装calico 网络插件

curl https://docs.projectcalico.org/manifests/calico.yaml -O

修改 CALICO_IPV4POOL_CIDR 为上面–cluster-cidr

行号				修改内容
4434             - name: CALICO_IPV4POOL_CIDR
4435               value: "10.200.0.0/16"

创建pod

[root@master cfg]# kubectl apply -f calico.yaml
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/caliconodestatuses.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipreservations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
poddisruptionbudget.policy/calico-kube-controllers created

验证

[root@master cfg]# kubectl get pod -A
NAMESPACE     NAME                                      READY   STATUS             RESTARTS   AGE
kube-system   calico-kube-controllers-6b77fff45-cmmrp   1/1     Running            0          3m37s
kube-system   calico-node-b29jl                         1/1     Running            0          3m37s

master安装coredns

创建yaml文件

< selector:
k8s-app: kube-dns
clusterIP: 10.50.0.10
修改为自己的分配的网断DNS地址

cat > coredns.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
      kubernetes.io/cluster-service: "true"
      addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get
- apiGroups:
  - discovery.k8s.io
  resources:
  - endpointslices
  verbs:
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
  labels:
      addonmanager.kubernetes.io/mode: EnsureExists
data:
  Corefile: |
    .:53 {
        errors
        health {
            lameduck 5s
        }
        ready
        kubernetes cluster.local in-addr.arpa ip6.arpa {
            pods insecure
            fallthrough in-addr.arpa ip6.arpa
            ttl 30
        }
        prometheus :9153
        forward . /etc/resolv.conf {
            max_concurrent 1000
        }
        cache 30
        loop
        reload
        loadbalance
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  # replicas: not specified here:
  # 1. In order to make Addon Manager do not reconcile this replicas parameter.
  # 2. Default is 1.
  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
    spec:
      securityContext:
        seccompProfile:
          type: RuntimeDefault
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      affinity:
        podAntiAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
          - weight: 100
            podAffinityTerm:
              labelSelector:
                matchExpressions:
                  - key: k8s-app
                    operator: In
                    values: ["kube-dns"]
              topologyKey: kubernetes.io/hostname
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        kubernetes.io/os: linux
      containers:
      - name: coredns
        image: coredns/coredns:1.8.6
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 70Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /ready
            port: 8181
            scheme: HTTP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.50.0.10
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP
EOF

从上面网址中复制内容到yaml文件,修改其中一下几个位置

clusterIP: $DNS_SERVER_IP的IP,这个IP和kubelet.json中的”clusterDNS”: [“10.255.0.2”]对应

$DNS_DOMAIN 和apiserver里的host中的域名对应 cluster.local

memory: $DNS_MEMORY_LIMIT 随便写一个大于下面的70Mi就行,这里我加了100写的170Mi

image:k8s.gcr.io/coredns/coredns:v1.8.6 改成 image: coredns/coredns:1.8.6 源地址访问不到需要用镜像加速,之前安装docker的时候已经配置过了

创建pod

[root@master cfg]# kubectl apply -f coredns.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created

查看状态

[root@master cfg]# kubectl get svc -n kube-system
NAME       TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE
kube-dns   ClusterIP   10.50.0.10   <none>        53/UDP,53/TCP,9153/TCP   4m42s

[root@master cfg]# kubectl get pod -A
NAMESPACE     NAME                                      READY   STATUS             RESTARTS   AGE
kube-system   calico-kube-controllers-6b77fff45-cmmrp   1/1     Running            0          5m51s
kube-system   calico-node-b29jl                         1/1     Running            0          5m51s
kube-system   coredns-84846f69f8-l96nj                  0/1     ImagePullBackOff   0          5m11s

[root@master cfg]# kubectl get pod -A -o wide
NAMESPACE     NAME                                      READY   STATUS             RESTARTS   AGE     IP              NODE     NOMINATED NODE   READINESS GATES
kube-system   calico-kube-controllers-6b77fff45-cmmrp   1/1     Running            0          9m57s   10.200.219.65   master   <none>           <none>
kube-system   calico-node-b29jl                         1/1     Running            0          9m57s   172.16.48.133   master   <none>           <none>
kube-system   coredns-84846f69f8-l96nj                  1/1     Running   0          9m17s   10.200.219.66   master   <none>           <none>

验证

[root@master cfg]# dig -t a www.baidu.com @10.50.0.10

; <<>> DiG 9.11.4-P2-RedHat-9.11.4-26.P2.el7_9.9 <<>> -t a www.baidu.com @10.50.0.10
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 667
;; flags: qr rd ra; QUERY: 1, ANSWER: 3, AUTHORITY: 0, ADDITIONAL: 1

;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;www.baidu.com.			IN	A

;; ANSWER SECTION:
www.baidu.com.		30	IN	CNAME	www.a.shifen.com.
www.a.shifen.com.	30	IN	A	220.181.38.149
www.a.shifen.com.	30	IN	A	220.181.38.150

;; Query time: 12 msec
;; SERVER: 10.50.0.10#53(10.50.0.10)
;; WHEN: 四 5月 26 16:11:16 CST 2022
;; MSG SIZE  rcvd: 149

创建nginx测试

创建service

写入nginx service文件

[root@master cfg]# cat nginx-svc.yaml
apiVersion: v1
kind: Service
metadata:
  namespace: default
  name: nginx-svc
  labels:
    name: nginx-svc
spec:
  type: NodePort
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
    name: http
    nodePort: 30001
  selector:
    name: nginx-pod

创建

kubectl create -f nginx-svc.yaml

查看

[root@master cfg]# kubectl get svc -o wide
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE     SELECTOR
kubernetes   ClusterIP   10.50.0.1       <none>        443/TCP        20h     <none>
nginx-svc    NodePort    10.50.186.246   <none>        80:30001/TCP   5m30s   name=nginx-pod

写入nginx pod文件

[root@master cfg]# cat nginx-pod.yaml
apiVersion: v1
kind: ReplicationController
metadata:
  namespace: default
  name: nginx-rc
  labels:
    name: nginx-rc
spec:
  replicas: 1
  selector:
    name: nginx-pod
  template:
    metadata:
      labels:
        name: nginx-pod
    spec:
      containers:
      - name: nginx
        image: nginx
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80

创建

kubectl create -f nginx-pod.yaml

查看

[root@master cfg]# kubectl get pods -o wide
NAME             READY   STATUS    RESTARTS   AGE    IP              NODE     NOMINATED NODE   READINESS GATES
nginx-rc-6jh5z   1/1     Running   0          7m5s   10.200.219.70   master   <none>           <none>

访问

image-1653556501601

0

评论区