zoukankan      html  css  js  c++  java
  • Kubernetes集群部署(yum部署)

    环境准备

    Kubernetes-Master:192.168.37.134    #yum install kubernetes-master etcd flannel -y

    Kubernetes-node1:192.168.37.135     #yum install kubernetes-node etcd docker flannel *rhsm* -y

    Kubernetes-node2:192.168.37.146     #yum install kubernetes-node etcd docker flannel *rhsm* -y

    系统版本:Centos7.5

    关闭Firewalld防火墙,保证ntp时间正常同步同步

    【K8s-master-etcd配置】

    [root@Kubernetes-master ~]# egrep -v "#|^$" /etc/etcd/etcd.conf
    ETCD_DATA_DIR="/data/etcd1"
    ETCD_LISTEN_PEER_URLS="http://192.168.37.134:2380"
    ETCD_LISTEN_CLIENT_URLS="http://192.168.37.134:2379,http://127.0.0.1:2379"
    ETCD_MAX_SNAPSHOTS="5"
    ETCD_NAME="etcd1"
    ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.37.134:2380"
    ETCD_ADVERTISE_CLIENT_URLS="http://192.168.37.134:2379"
    ETCD_INITIAL_CLUSTER="etcd1=http://192.168.37.134:2380,etcd2=http://192.168.37.135:2380,etcd3=http://192.168.37.136:2380"

    配置文件详解:

    ETCD_DATA_DIR:etcd节点名称

    ETCD_LISTEN_PEER_URLS:该节点与其他etcd节点通信时所监听的地址

    ETCD_LISTEN_CLIENT_URLS:etcd节点与客户端通信时所监听的地址列表

    ETCD_INITIAL_ADVERTISE_PEER_URLS:etcd集群通信所监听节点地址和端口

    ETCD_ADVERTISE_CLIENT_URLS:广播本地节点地址告知其他etcd节点,监听本地的网络和端口2379

    ETCD_INITIAL_CLUSTER:配置etcd集群内部所有成员地址,同时监听2380端口,方便etcd集群节点同步数据

    root@Kubernetes-master ~]# mkdir -p /data/etcd1/

    [root@Kubernetes-master ~]# chmod 757 -R /data/etcd1/

    【K8s-etcd1配置】

    [root@kubernetes-node1 ~]# egrep -v "#|^$" /etc/etcd/etcd.conf 
    ETCD_DATA_DIR="/data/etcd2"
    ETCD_LISTEN_PEER_URLS="http://192.168.37.135:2380"
    ETCD_LISTEN_CLIENT_URLS="http://192.168.37.135:2379,http://127.0.0.1:2379"
    ETCD_MAX_SNAPSHOTS="5"
    ETCD_NAME="etcd2"
    ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.37.135:2380"
    ETCD_ADVERTISE_CLIENT_URLS="http://192.168.37.135:2379"
    ETCD_INITIAL_CLUSTER="etcd1=http://192.168.37.134:2380,etcd2=http://192.168.37.135:2380,etcd3=http://192.168.37.136:2380"

    [root@kubernetes-node1 ~]# mkdir -p /data/etcd2/

    [root@kubernetes-node1 ~]#chmod 757 -R /data/etcd2/

    【K8s-node2-etcd配置】

    [root@kubernetes-node2 ~]# egrep -v "#|^$" /etc/etcd/etcd.conf  
    ETCD_DATA_DIR="/data/etcd3"
    ETCD_LISTEN_PEER_URLS="http://192.168.37.136:2380"
    ETCD_LISTEN_CLIENT_URLS="http://192.168.37.136:2379,http://127.0.0.1:2379"
    ETCD_MAX_SNAPSHOTS="5"
    ETCD_NAME="etcd3"
    ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.37.136:2380"
    ETCD_ADVERTISE_CLIENT_URLS="http://192.168.37.136:2379"
    ETCD_INITIAL_CLUSTER="etcd1=http://192.168.37.134:2380,etcd2=http://192.168.37.135:2380,etcd3=http://192.168.37.136:2380"

    [root@kubernetes-node2 ~]# mkdir /data/etcd3/

    [root@kubernetes-node2 ~]# chmod 757 -R /data/etcd3/

    至此,ETCD集群已配置完毕,接下来启动并验证etcd集群是否正常~

    [root@Kubernetes-master ~]# systemctl start etcd.service     #注意,上述节点都需要启动etcd服务,同时也设置自启
    [root@Kubernetes-master ~]# systemctl enable etcd.service

    【K8s-master节点API-server/config配置】

    [root@Kubernetes-master ~]# egrep -v "#|^$" /etc/kubernetes/apiserver

    KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
    KUBE_API_PORT="--port=8080"
    KUBELET_PORT="--kubelet-port=10250"
    KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.37.134,http://192.168.37.135:2379,http://192.168.37.136:2379"
    KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
    KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota"
    KUBE_API_ARGS=""

     

    [root@Kubernetes-master ~]#systemctl start kube-apiserver  
    [root@Kubernetes-master ~]# systemctl enable kube-apiserver

    [root@Kubernetes-master ~]# egrep -v "#|^$" /etc/kubernetes/config 
    KUBE_LOGTOSTDERR="--logtostderr=true"
    KUBE_LOG_LEVEL="--v=0"
    KUBE_ALLOW_PRIV="--allow-privileged=false"
    KUBE_MASTER="--master=http://192.168.37.134:8080"

    [root@Kubernetes-master kubernetes]# systemctl start kube-controller-manager
    [root@Kubernetes-master kubernetes]# systemctl enable kube-controller-manager
    [root@Kubernetes-master kubernetes]# systemctl start kube-scheduler
    [root@Kubernetes-master kubernetes]# systemctl enable kube-scheduler

    【k8s-node1】

    kubelet配置文件

    [root@kubernetes-node1 ~]# egrep -v "#|^$" /etc/kubernetes/kubelet 
    KUBELET_ADDRESS="--address=0.0.0.0"
    KUBELET_HOSTNAME="--hostname-override=192.168.37.135"
    KUBELET_API_SERVER="--api-servers=http://192.168.37.134:8080"
    KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
    KUBELET_ARGS=""

     

    config主配置文件

    [root@kubernetes-node1 ~]# egrep -v "#|^$" /etc/kubernetes/config 
    KUBE_LOGTOSTDERR="--logtostderr=true"
    KUBE_LOG_LEVEL="--v=0"
    KUBE_ALLOW_PRIV="--allow-privileged=false"
    KUBE_MASTER="--master=http://192.168.37.134:8080"

    [root@kubernetes-node1 ~]# systemctl start kubelet
    [root@kubernetes-node1 ~]# systemctl enable kubelet
    [root@kubernetes-node1 ~]# systemctl start kube-proxy
    [root@kubernetes-node1 ~]# systemctl enable kube-proxy

    【k8s-node2】

    kubelet配置文件

    [root@kubernetes-node2 ~]# egrep -v "#|^$" /etc/kubernetes/kubelet 
    KUBELET_ADDRESS="--address=0.0.0.0"
    KUBELET_HOSTNAME="--hostname-override=192.168.37.136"
    KUBELET_API_SERVER="--api-servers=http://192.168.37.134:8080"
    KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
    KUBELET_ARGS=""

    config主配置文件

    [root@kubernetes-node2 ~]# egrep -v "^$|#" /etc/kubernetes/config 
    KUBE_LOGTOSTDERR="--logtostderr=true"
    KUBE_LOG_LEVEL="--v=0"
    KUBE_ALLOW_PRIV="--allow-privileged=false"
    KUBE_MASTER="--master=http://192.168.37.134:8080"

    [root@kubernetes-node2 ~]# systemctl start kubelet
    [root@kubernetes-node2 ~]# systemctl enable kubelet
    [root@kubernetes-node2 ~]# systemctl start kube-proxy
    [root@kubernetes-node2 ~]# systemctl enable kube-proxy
    【Kubernetes-flanneld网络配置】

     [root@Kubernetes-master kubernetes]# egrep -v "#|^$" /etc/sysconfig/flanneld
    FLANNEL_ETCD_ENDPOINTS="http://192.168.37.134:2379"
    FLANNEL_ETCD_PREFIX="/atomic.io/network"

    [root@kubernetes-node1 ~]# egrep -v "#|^$" /etc/sysconfig/flanneld 
    FLANNEL_ETCD_ENDPOINTS="http://192.168.37.134:2379"
    FLANNEL_ETCD_PREFIX="/atomic.io/network"

     [root@kubernetes-node2 ~]# egrep -v "#|^$" /etc/sysconfig/flanneld
     FLANNEL_ETCD_ENDPOINTS="http://192.168.37.134:2379"
     FLANNEL_ETCD_PREFIX="/atomic.io/network"

    [root@Kubernetes-master kubernetes]# etcdctl mk /atomic.io/network/config '{"Network":"172.17.0.0/16"}'
    {"Network":"172.17.0.0/16"}
    [root@Kubernetes-master kubernetes]# etcdctl get /atomic.io/network/config
    {"Network":"172.17.0.0/16"}

    [root@Kubernetes-master kubernetes]# systemctl restart flanneld
    [root@Kubernetes-master kubernetes]# systemctl enable flanneld

    [root@kubernetes-node1 ~]# systemctl start flanneld
    [root@kubernetes-node1 ~]# systemctl enable flanneld

    [root@kubernetes-node2 ~]# systemctl start flanneld
    [root@kubernetes-node2 ~]# systemctl enable flanneld

    Ps:重启flanneld网络,会出现三个节点的IP,在node节点上要保证docker和自己的flanneld网段一致。如果不一致,重启docker服务即可恢复,否则的话,三个网段ping测不通

    [root@Kubernetes-master ~]# etcdctl ls /atomic.io/network/subnets
    /atomic.io/network/subnets/172.17.2.0-24
    /atomic.io/network/subnets/172.17.23.0-24
    /atomic.io/network/subnets/172.17.58.0-24

    检查Kubernetes-node节点防火墙设置,查看转发规则是否为drop,需开启 iptables -P FORWARD ACCEPT规则

    [root@kubernetes-node1 ~]# iptables -L -n   #查看防火墙规则
    Chain INPUT (policy ACCEPT)
    target     prot opt source               destination         
    KUBE-FIREWALL  all  --  0.0.0.0/0            0.0.0.0/0           
    
    Chain FORWARD (policy ACCEPT)
    target     prot opt source               destination         
    DOCKER-ISOLATION  all  --  0.0.0.0/0            0.0.0.0/0           
    DOCKER     all  --  0.0.0.0/0            0.0.0.0/0           
    ACCEPT     all  --  0.0.0.0/0            0.0.0.0/0            ctstate RELATED,ESTABLISHED
    ACCEPT     all  --  0.0.0.0/0            0.0.0.0/0           
    ACCEPT     all  --  0.0.0.0/0            0.0.0.0/0           
    
    Chain OUTPUT (policy ACCEPT)
    target     prot opt source               destination         
    KUBE-SERVICES  all  --  0.0.0.0/0            0.0.0.0/0            /* kubernetes service portals */
    KUBE-FIREWALL  all  --  0.0.0.0/0            0.0.0.0/0           
    
    Chain DOCKER (1 references)
    target     prot opt source               destination         
    
    Chain DOCKER-ISOLATION (1 references)
    target     prot opt source               destination         
    RETURN     all  --  0.0.0.0/0            0.0.0.0/0           
    
    Chain KUBE-FIREWALL (2 references)
    target     prot opt source               destination         
    DROP       all  --  0.0.0.0/0            0.0.0.0/0            /* kubernetes firewall for dropping marked packets */ mark match 0x8000/0x8000
    
    Chain KUBE-SERVICES (1 references)
    target     prot opt source               destination  

    或者开启转发功能

    echo "net.ipv4.ip_forward = 1" >>/usr/lib/sysctl.d/50-default.conf

    [root@Kubernetes-master ~]# etcdctl ls /atomic.io/network/subnets  #查看网络信息,保证连通性正常~
    /atomic.io/network/subnets/172.17.38.0-24
    /atomic.io/network/subnets/172.17.89.0-24
    /atomic.io/network/subnets/172.17.52.0-24

    [root@Kubernetes-master ~]# kubectl get nodes    #在master上查看kubernetes的节点状态
    NAME STATUS AGE
    192.168.37.135 Ready 5m
    192.168.37.136 Ready 5m

    [root@Kubernetes-master ~]# etcdctl member list    #检查etcd集群节点状态
    328468069ff33f93: name=etcd1 peerURLs=http://192.168.37.134:2380 clientURLs=http://192.168.37.134:2379 isLeader=true
    c2f8384c4776d3e7: name=etcd3 peerURLs=http://192.168.37.136:2380 clientURLs=http://192.168.37.136:2379 isLeader=false
    d6ef60212aca5419: name=etcd2 peerURLs=http://192.168.37.135:2380 clientURLs=http://192.168.37.135:2379 isLeader=false
    [root@Kubernetes-master ~]# kubectl get nodes    #查看k8s集群node节点状态
    NAME STATUS AGE
    192.168.37.135 Ready 4h
    192.168.37.136 Ready 1h

     【K8s-Dashboard UI平台部署】

    Kubernetes实现对docker容器集群的统一管理和调度,通过web界面能够更好的管理和控制

    Ps:这里我们只需要在node1节点导入镜像即可

    [root@kubernetes-node1 ~]# docker load < pod-infrastructure.tgz 
    [root@kubernetes-node1 ~]# docker tag $(docker images | grep none | awk '{print $3}') registry.access.redhat.com/rhel7/pod-infrastructure [root@kubernetes-node1 ~]# docker images REPOSITORY TAG IMAGE ID CREATED SIZE registry.access.redhat.com/rhel7/pod-infrastructure latest 99965fb98423 18 months ago 209 MB
    [root@kubernetes-node1 ~]# docker load < kubernetes-dashboard-amd64.tgz
    [root@kubernetes-node1 ~]# docker tag $(docker images | grep none | awk '{print $3}') bestwu/kubernetes-dashboard-amd64:v1.6.3
    [root@kubernetes-node1 ~]# docker images
    REPOSITORY                                            TAG                 IMAGE ID            CREATED             SIZE
    registry.access.redhat.com/rhel7/pod-infrastructure   latest              99965fb98423        18 months ago       209 MB
    bestwu/kubernetes-dashboard-amd64                     v1.6.3              9595afede088        21 months ago       139 MB

    【Kubernetes-master】

    编辑 ymal文件并创建Dashboard pods模块

     [root@Kubernetes-master ~]# vim dashboard-controller.yaml

    [root@Kubernetes-master ~]# cat dashboard-controller.yaml 
    apiVersion: extensions/v1beta1
    kind: Deployment
    metadata:
      name: kubernetes-dashboard
      namespace: kube-system
      labels:
        k8s-app: kubernetes-dashboard
        kubernetes.io/cluster-service: "true"
    spec:
      selector:
        matchLabels:
          k8s-app: kubernetes-dashboard
      template:
        metadata:
          labels:
            k8s-app: kubernetes-dashboard
          annotations:
            scheduler.alpha.kubernetes.io/critical-pod: ''
            scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
        spec:
          containers:
          - name: kubernetes-dashboard
            image: bestwu/kubernetes-dashboard-amd64:v1.6.3
            resources:
              # keep request = limit to keep this container in guaranteed class
              limits:
                cpu: 100m
                memory: 50Mi
              requests:
                cpu: 100m
                memory: 50Mi
            ports:
            - containerPort: 9090
            args:
              - --apiserver-host=http://192.168.37.134:8080
            livenessProbe:
              httpGet:
                path: /
                port: 9090
              initialDelaySeconds: 30
              timeoutSeconds: 30
    View Code

     [root@Kubernetes-master ~]# vim dashboard-service.yaml

    apiVersion: v1
    kind: Service
    metadata:
      name: kubernetes-dashboard
      namespace: kube-system
      labels:
        k8s-app: kubernetes-dashboard
        kubernetes.io/cluster-service: "true"
    spec:
      selector:
        k8s-app: kubernetes-dashboard
      ports:
      - port: 80
        targetPort: 9090
    View Code

    [root@Kubernetes-master ~]# kubectl apply -f dashboard-controller.yaml

    [root@Kubernetes-master ~]# kubectl apply -f dashboard-service.yaml 

    Ps:在创建 模块的同时,检查日志是否出现异常信息

    [root@Kubernetes-master ~]# tail -f /var/log/messages

    可以在node1节点上查看容器已经启动成功~

    [root@kubernetes-node1 ~]# docker ps 
    CONTAINER ID        IMAGE                                                        COMMAND                  CREATED             STATUS              PORTS        
    f118f845f19f        bestwu/kubernetes-dashboard-amd64:v1.6.3                     "/dashboard --inse..."   8 minutes ago       Up 8 minutes                     30dc9e7f_kubernetes-dashboard-1315149111-pfb60_kube-system_19dcb04b-6d6e-11e9-9599-000c291881f6_02fd5b8e
    67b7746a6d23        registry.access.redhat.com/rhel7/pod-infrastructure:latest   "/usr/bin/pod"           8 minutes ago       Up 8 minutes                     es-dashboard-1315149111-pfb60_kube-system_19dcb04b-6d6e-11e9-9599-000c291881f6_4e2cb565

     通过浏览器可验证输出k8s-master端访问即可

     

    简单部署启动一个nginx容器,并且对外提供访问服务

    创建Server外部服务,默认会启动一个随机集群IP,将80端口映射成后端pod容器端口80,通过在局域网访问集群IP+80端口,即可访问后端pod集群应用,若是外部访问则通过node节点IP+随机生成的端口接口访问pod后端应用

     

    浏览器访问node节点的IP地址+随机映射端口即可访问到k8s创建的nginx容器

    http://192.168.37.136:31090/

    【拓展-本地私有仓库部署】

    # docker run -itd -p 5000:5000 -v /data/registry:/var/registry docker.io/registry

    # docker tag docker.io/tomcat 192.168.37.135:5000/tomcat

    # vim  /etc/sysconfig/docker

    OPTIONS='--selinux-enabled --log-driver=journald --signature-verification=false --insecure-registry 192.168.37.135:5000'
    ADD_REGISTRY='--add-registry 192.168.37.135:5000'

    #systemctl  restart docker.service

    # docker push 192.168.37.135:5000/tomcat

  • 相关阅读:
    [Taro] 解决 使用 Taro UI 小程序下 Iconfont 图标 不显示问题
    [Taro] Taro 环境安装 (一)
    [RN] react-native FlatList 实现列表选中的最佳方式(刷新指定Item)
    [RN] React Native FlatList 选中后 状态没有立即发生改变,而在下一次生效的问题
    [RN] React Native 使用 Redux 比较详细和深刻的教程
    [Taro] taro 缓存
    个人总结第十五周
    个人总结第十四周
    个人总结第十三周
    个人总结第十二周
  • 原文地址:https://www.cnblogs.com/bixiaoyu/p/10803981.html
Copyright © 2011-2022 走看看