前一篇文章介绍了如何yum安装简单的kubernetes集群,其中etcd是单点部署。本篇我们来搭建etcd集群,方便日后搭建kubernetes HA集群架构。
1,环境配置说明
etcd1 192.168.20.71
etcd2 192.168.20.72
etcd3 192.168.20.73
2,etcd版本
[root@master1 ~]# etcdctl --version
etcdctl version: 3.2.22
API version: 2
3,开始安装
etcd每台安装步骤都一样,所以etcd2、etcd3都以etcd1步骤一样
安装只需一条命令即可:
yum -y install etcd
4,查看并修改etcd配置文件
1 #修配etcd配置文件 2 [root@master1 ~]# ls /etc/etcd/ 3 etcd.conf etcd.conf.back 4 [root@master1 ~]# grep -v '^#' /etc/etcd/etcd.conf 5 ETCD_NAME=etcd1 6 7 ETCD_DATA_DIR="/var/lib/etcd/etcd1" 8 9 ETCD_LISTEN_PEER_URLS="http://192.168.20.71:2380" 10 11 ETCD_LISTEN_CLIENT_URLS="http://127.0.0.1:2379,http://192.168.20.71:2379" 12 13 ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.20.71:2380" 14 15 ETCD_INITIAL_CLUSTER="etcd1=http://192.168.20.71:2380,etcd2=http://192.168.20.72:2380,etcd3=http://192.168.20.73:2380" 16 17 ETCD_INITIAL_CLUSTER_STATE="new" 18 19 ETCD_INITIAL_CLUSTER_TOKEN="etcd-test" 20 21 ETCD_ADVERTISE_CLIENT_URLS="http://192.168.20.71:2379"
5,启动etcd服务
1 #etcd启动配置文件为: 2 [root@master1 ~]# cat /usr/lib/systemd/system/etcd.service 3 [Unit] 4 Description=Etcd Server 5 After=network.target 6 After=network-online.target 7 Wants=network-online.target 8 9 [Service] 10 Type=notify 11 WorkingDirectory=/var/lib/etcd/ 12 EnvironmentFile=-/etc/etcd/etcd.conf 13 User=etcd 14 # set GOMAXPROCS to number of processors 15 ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/bin/etcd --name="${ETCD_NAME}" --data-dir="${ETCD_DATA_DIR}" 16 --listen-peer-urls="${ETCD_LISTEN_PEER_URLS}" 17 --listen-client-urls="${ETCD_LISTEN_CLIENT_URLS}" 18 --advertise-client-urls="${ETCD_ADVERTISE_CLIENT_URLS}" 19 --initial-cluster-token="${ETCD_INITIAL_CLUSTER_TOKEN}" 20 --initial-cluster="${ETCD_INITIAL_CLUSTER}" 21 --initial-cluster-state="${ETCD_INITIAL_CLUSTER_STATE}"" 22 Restart=on-failure 23 LimitNOFILE=65536 24 25 [Install] 26 WantedBy=multi-user.target 27 28 #启动etcd服务 29 systemctl start etcd 30 #查看启动状态 31 systemctl status etcd
6,etcd2、etcd3配置文件
1 #etcd2配置文件 2 [root@master2 ~]# cat /etc/etcd/etcd.conf 3 ETCD_NAME=etcd2 4 5 ETCD_DATA_DIR="/var/lib/etcd/etcd2" 6 7 ETCD_LISTEN_PEER_URLS="http://192.168.20.72:2380" 8 9 ETCD_LISTEN_CLIENT_URLS="http://127.0.0.1:2379,http://192.168.20.72:2379" 10 11 ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.20.72:2380" 12 13 ETCD_INITIAL_CLUSTER="etcd1=http://192.168.20.71:2380,etcd2=http://192.168.20.72:2380,etcd3=http://192.168.20.73:2380" 14 15 ETCD_INITIAL_CLUSTER_STATE="new" 16 17 ETCD_INITIAL_CLUSTER_TOKEN="etcd-test" 18 19 ETCD_ADVERTISE_CLIENT_URLS="http://192.168.20.72:2379" 20 21 22 #etcd3 配置文件 23 [root@master ~]# cat /etc/etcd/etcd.conf 24 ETCD_NAME=etcd3 25 26 ETCD_DATA_DIR="/var/lib/etcd/etcd3" 27 28 ETCD_LISTEN_PEER_URLS="http://192.168.20.73:2380" 29 30 ETCD_LISTEN_CLIENT_URLS="http://127.0.0.1:2379,http://192.168.20.73:2379" 31 32 ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.20.73:2380" 33 34 ETCD_INITIAL_CLUSTER="etcd1=http://192.168.20.71:2380,etcd2=http://192.168.20.72:2380,etcd3=http://192.168.20.73:2380" 35 36 ETCD_INITIAL_CLUSTER_STATE="new" 37 38 ETCD_INITIAL_CLUSTER_TOKEN="etcd-test" 39 40 ETCD_ADVERTISE_CLIENT_URLS="http://192.168.20.73:2379" 41 42 #分别启动etcd2、etcd3服务 43 #查看cluster状态 44 [root@master1 ~]# etcdctl cluster-health 45 member 85b5f1a0537e385d is healthy: got healthy result from http://192.168.20.71:2379 46 member 9f304c9e0feb949d is healthy: got healthy result from http://192.168.20.72:2379 47 member ec71f609370df393 is healthy: got healthy result from http://192.168.20.73:2379 48 cluster is healthy 49 50 #列出etcd服务状态 51 [root@master1 ~]# etcdctl member list 52 85b5f1a0537e385d: name=etcd1 peerURLs=http://192.168.20.71:2380 clientURLs=http://192.168.20.71:2379 isLeader=false 53 9f304c9e0feb949d: name=etcd2 peerURLs=http://192.168.20.72:2380 clientURLs=http://192.168.20.72:2379 isLeader=false 54 ec71f609370df393: name=etcd3 peerURLs=http://192.168.20.73:2380 clientURLs=http://192.168.20.73:2379 isLeader=true 55 56 #从列出信息可以看出,目前是etcd3为主节点。 57 #查看etcd服务启动日志,可通过 tail -f /var/log/message 动态查看
7,至此,etcd集群已配置完成。接下来可以对kubernetes集群apiserver配置文件进行修改,使其指向etcd集群
1 #修改master节点,apiserver配置文件 2 [root@master ~]# cat /etc/kubernetes/apiserver 3 ### 4 ## kubernetes system config 5 KUBE_API_ADDRESS="--address=0.0.0.0" 6 KUBE_API_PORT="--port=8080" 7 KUBELET_PORT="--kubelet-port=10250" 8 KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.20.71:2379,http://192.168.20.72:2379,http://192.168.20.73:2379" 9 KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" 10 KUBE_API_ARGS="--service_account_key_file=/etc/kubernetes/serviceaccount.key" 11 KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" 12 KUBE_API_ARGS="" 13 14 #k8s集群做任何调整后,都需要重启服务 15 #重启master各组件,可连起来写 16 systemctl restart kube-apiserver kube-controller-manager kube-scheduler 17 #重启node1、node2各组件 18 systemctl restart kubelet kube-proxy 19 20 #再次在master节点查看etcd、node集群状态 21 #测试,可关闭一台etcd服务,创建一个pod,无异常 22 #通过测试可以得出,etcd集群至少需要2个etcd节点才可以正常工作。
8,如何创建pod,参加下一篇文章。