目录
1.本机架构
主机名 | IP地址 | 角色 | 节点 |
---|---|---|---|
hdss7-21.host.com | 10.4.7.21 | kube-proxy | Node |
hdss7-22.host.com | 10.4.7.22 | kube-proxy | Node |
2.创建生成证书csr的json配置文件
在hdss7-200的主机上操作
cd /opt/certs
cat > kube-proxy-csr.json <<'eof'
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "GuangZhou",
"L": "GuangZhou",
"O": "k8s",
"OU": "yw"
}
]
}
eof
3.生成kube-proxy的client证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client kube-proxy-csr.json |cfssl-json -bare kube-proxy-client
[root@hdss7-200 certs]# ll kube-proxy-c*
-rw-r--r--. 1 root root 1009 9月 20 22:37 kube-proxy-client.csr
-rw-------. 1 root root 1675 9月 20 22:37 kube-proxy-client-key.pem
-rw-r--r--. 1 root root 1387 9月 20 22:37 kube-proxy-client.pem
-rw-r--r--. 1 root root 271 9月 20 22:37 kube-proxy-csr.json
4.拷贝证书
scp /opt/certs/kube-proxy-client.pem hdss7-21:/opt/kubernetes/server/bin/certs
scp /opt/certs/kube-proxy-client-key.pem hdss7-21:/opt/kubernetes/server/bin/certs
scp /opt/certs/kube-proxy-client.pem hdss7-22:/opt/kubernetes/server/bin/certs
scp /opt/certs/kube-proxy-client-key.pem hdss7-22:/opt/kubernetes/server/bin/certs
5.创建配置
在hdss7-21.host.com上操作
- 5.1 set-cluster
cd /opt/kubernetes/server/bin/conf
kubectl config set-cluster myk8s \
--certificate-authority=/opt/kubernetes/server/bin/certs/ca.pem \
--embed-certs=true \
--server=https://10.4.7.10:7443 \
--kubeconfig=kube-proxy.kubeconfig
- 5.2 set-credentials
kubectl config set-credentials kube-proxy \
--client-certificate=/opt/kubernetes/server/bin/certs/kube-proxy-client.pem \
--client-key=/opt/kubernetes/server/bin/certs/kube-proxy-client-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
- 5.3 set-context
kubectl config set-context myk8s-context \
--cluster=myk8s \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
- 5.4 use-context
kubectl config use-context myk8s-context --kubeconfig=kube-proxy.kubeconfig
6.拷贝配置
scp kube-proxy.kubeconfig hdss7-22:/opt/kubernetes/server/bin/conf/kube-proxy.kubeconfig
7.加载ipvs模块
在hdss7-21.host.com,hdss7-22.host.com上操作,以hdss7-21操作为例
lsmod |grep ip_vs
cat > /root/ipvs.sh <<'eof'
#!/bin/bash
ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs"
for i in $(ls $ipvs_mods_dir|grep -o "^[^.]*")
do
/sbin/modinfo -F filename $i &>/dev/null
if [ $? -eq 0 ];then
/sbin/modprobe $i
fi
done
eof
chmod +x /root/ipvs.sh
sh /root/ipvs.sh
lsmod |grep ip_vs
8.创建启动脚本
不同地方:–hostname-override hdss7-21.host.com
cat > /opt/kubernetes/server/bin/kube-proxy.sh <<'eof'
#!/bin/sh
./kube-proxy \
--cluster-cidr 172.7.0.0/16 \
--hostname-override hdss7-21.host.com \
--proxy-mode=ipvs \
--ipvs-scheduler=nq \
--kubeconfig ./conf/kube-proxy.kubeconfig
eof
chmod +x /opt/kubernetes/server/bin/kube-proxy.sh
mkdir -p /data/logs/kubernetes/kube-proxy
--ipvs-scheduler=nq 采用nq算法
sed(shortest expected delay scheduling):最少期望延迟
说明:不考虑非活动连接,谁的权重大,我们优先选择权重大的服务器来接受请求,但会出现问题,就是权重比较大的服务器会很忙,但权重相对较小的服务器很闲,甚至会接收不到请求,所以便有了下面的算法nq。
nq(never queue scheduling):永不排队
在上面我们说明了,由于某台服务器的权重较小,比较空闲,甚至接收不到请求,而权重大的服务器会很忙,所以算法是sed改进,就是说不管你的权重多大都会被分配到请求。简单说,无需队列,如果有台real server的连接数为0就直接分配过去,不需要在进行sed运算。
9.创建supervisor配置
cat > /etc/supervisord.d/kube-proxy.ini <<'eof'
[program:kube-proxy-7-21]
command=/opt/kubernetes/server/bin/kube-proxy.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-proxy/proxy.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
eof
不同地方:[program:kube-proxy-7-21]
10.启动服务并检查
controller-manager配置service集群IP192.168.0.0网段。
kube-proxy.kubeconfig配置访问VIP10.4.7.10,再负载均衡到10.4.7.21,10.4.7.22,把数据上传到apiserver,接着再保存到etcd。
supervisorctl update
supervisorctl status
yum install ipvsadm -y
[root@hdss7-21 ~]# ipvsadm -Ln
kubectl get svcIP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.0.1:443 nq
-> 10.4.7.21:6443 Masq 1 0 0
-> 10.4.7.22:6443 Masq 1 0 0
[root@hdss7-21 ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 192.168.0.1 <none> 443/TCP 24h