zoukankan      html  css  js  c++  java
  • Kubernetes之基于gitlab的CICD自动化

    Kubernetes之基于gitlab的CICD自动化

    这里我们讲解一下在k8s中基于gitlab的实战部署,生产环境中是根据下面这张图来进行实战的。

    第13关 k8s架构师课程之私有镜像仓库-Harbor

    我们先来部署gitlab私有代码仓库所需要的数据库postgresql和redis。

    需要注意的是,如果大家的nfs-server的地址和挂载目录不是按博哥前面课程讲得来定义的话,那么下面的yaml配置中需要记得替换。

    部署postgresql

    # ------------------------------------------------
    #  mkdir -p /nfs_dir/{gitlab_etc_ver130806,gitlab_log_ver130806,gitlab_opt_ver130806,gitlab_postgresql_data_ver130806}
    #  kubectl create namespace gitlab-ver130806
    #  kubectl -n gitlab-ver130806 apply -f 3postgres.yaml
    #  kubectl -n gitlab-ver130806 apply -f 4redis.yaml
    #  kubectl -n gitlab-ver130806 apply -f 5gitlab.yaml
    #  kubectl -n gitlab-ver130806 apply -f 6gitlab-tls.yaml
    # ------------------------------------------------
    
    
    
    # pv
    ---
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: gitlab-postgresql-data-ver130806
      labels:
        type: gitlab-postgresql-data-ver130806
    spec:
      capacity:
        storage: 10Gi
      accessModes:
        - ReadWriteOnce
      persistentVolumeReclaimPolicy: Retain
      storageClassName: nfs
      nfs:
        path: /nfs_dir/gitlab_postgresql_data_ver130806
        server: 10.0.1.201
    
    # pvc
    ---
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
      name: gitlab-postgresql-data-ver130806-pvc
    spec:
      accessModes:
        - ReadWriteOnce
      resources:
        requests:
          storage: 10Gi
      storageClassName: nfs
      selector:
        matchLabels:
          type: gitlab-postgresql-data-ver130806
    ---
    apiVersion: v1
    kind: Service
    metadata:
      name: postgresql
      labels:
        app: gitlab
        tier: postgreSQL
    spec:
      ports:
        - port: 5432
      selector:
        app: gitlab
        tier: postgreSQL
    
    ---
    
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: postgresql
      labels:
        app: gitlab
        tier: postgreSQL
    spec:
      replicas: 1
      selector:
        matchLabels:
          app: gitlab
          tier: postgreSQL
      strategy:
        type: Recreate
      template:
        metadata:
          labels:
            app: gitlab
            tier: postgreSQL
        spec:
          #nodeSelector:
          #  gee/disk: "500g"
          containers:
            - image: postgres:12.6-alpine
            #- image: harbor.boge.com/library/postgres:12.6-alpine
              name: postgresql
              env:
                - name: POSTGRES_USER
                  value: gitlab
                - name: POSTGRES_DB
                  value: gitlabhq_production
                - name: POSTGRES_PASSWORD
                  value: bogeusepg
                - name: TZ
                  value: Asia/Shanghai
              ports:
                - containerPort: 5432
                  name: postgresql
              livenessProbe:
                exec:
                  command:
                  - sh
                  - -c
                  - exec pg_isready -U gitlab -h 127.0.0.1 -p 5432 -d gitlabhq_production
                initialDelaySeconds: 110
                timeoutSeconds: 5
                failureThreshold: 6
              readinessProbe:
                exec:
                  command:
                  - sh
                  - -c
                  - exec pg_isready -U gitlab -h 127.0.0.1 -p 5432 -d gitlabhq_production
                initialDelaySeconds: 20
                timeoutSeconds: 3
                periodSeconds: 5
    #          resources:
    #            requests:
    #              cpu: 100m
    #              memory: 512Mi
    #            limits:
    #              cpu: "1"
    #              memory: 1Gi
              volumeMounts:
                - name: postgresql
                  mountPath: /var/lib/postgresql/data
          volumes:
            - name: postgresql
              persistentVolumeClaim:
                claimName: gitlab-postgresql-data-ver130806-pvc
    

    部署redis

    ---
    apiVersion: v1
    kind: Service
    metadata:
      name: redis
      labels:
        app: gitlab
        tier: backend
    spec:
      ports:
        - port: 6379
          targetPort: 6379
      selector:
        app: gitlab
        tier: backend
    ---
    
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: redis
      labels:
        app: gitlab
        tier: backend
    spec:
      replicas: 1
      selector:
        matchLabels:
          app: gitlab
          tier: backend
      strategy:
        type: Recreate
      template:
        metadata:
          labels:
            app: gitlab
            tier: backend
        spec:
          #nodeSelector:
          #  gee/disk: "500g"
          containers:
            - image: redis:6.2.0-alpine3.13
            #- image: harbor.boge.com/library/redis:6.2.0-alpine3.13
              name: redis
              command:
                - "redis-server"
              args:
                - "--requirepass"
                - "bogeuseredis"
    #          resources:
    #            requests:
    #              cpu: "1"
    #              memory: 2Gi
    #            limits:
    #              cpu: "1"
    #              memory: 2Gi
              ports:
                - containerPort: 6379
                  name: redis
              livenessProbe:
                exec:
                  command:
                  - sh
                  - -c
                  - "redis-cli ping"
                initialDelaySeconds: 30
                periodSeconds: 10
                timeoutSeconds: 5
                successThreshold: 1
                failureThreshold: 3
              readinessProbe:
                exec:
                  command:
                  - sh
                  - -c
                  - "redis-cli ping"
                initialDelaySeconds: 5
                periodSeconds: 10
                timeoutSeconds: 1
                successThreshold: 1
                failureThreshold: 3
          initContainers:
          - command:
            - /bin/sh
            - -c
            - |
              ulimit -n 65536
              mount -o remount rw /sys
              echo never > /sys/kernel/mm/transparent_hugepage/enabled
              mount -o remount rw /proc/sys
              echo 2000 > /proc/sys/net/core/somaxconn
              echo 1 > /proc/sys/vm/overcommit_memory
            image: registry.cn-beijing.aliyuncs.com/acs/busybox:v1.29.2
            imagePullPolicy: IfNotPresent
            name: init-redis
            resources: {}
            securityContext:
              privileged: true
              procMount: Default
    

    部署gitlab

    先定制一下镜像

    Dockerfile

    FROM gitlab/gitlab-ce:13.8.6-ce.0
    
    RUN rm /etc/apt/sources.list 
        && echo 'deb http://apt.postgresql.org/pub/repos/apt/ xenial-pgdg main' > /etc/apt/sources.list.d/pgdg.list 
        && wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
    COPY sources.list /etc/apt/sources.list
    
    RUN apt-get update -yq && 
        apt-get install -y vim iproute2 net-tools iputils-ping curl wget software-properties-common unzip postgresql-client-12 && 
        rm -rf /var/cache/apt/archives/*
    
    RUN ln -svf /usr/bin/pg_dump /opt/gitlab/embedded/bin/pg_dump
    
    #---------------------------------------------------------------
    # docker build -t gitlab/gitlab-ce:13.8.6-ce.1 .
    

    sources.list

    deb http://mirrors.aliyun.com/ubuntu/ xenial main
    deb-src http://mirrors.aliyun.com/ubuntu/ xenial main
    deb http://mirrors.aliyun.com/ubuntu/ xenial-updates main
    deb-src http://mirrors.aliyun.com/ubuntu/ xenial-updates main
    deb http://mirrors.aliyun.com/ubuntu/ xenial universe
    deb-src http://mirrors.aliyun.com/ubuntu/ xenial universe
    deb http://mirrors.aliyun.com/ubuntu/ xenial-updates universe
    deb-src http://mirrors.aliyun.com/ubuntu/ xenial-updates universe
    deb http://mirrors.aliyun.com/ubuntu xenial-security main
    deb-src http://mirrors.aliyun.com/ubuntu xenial-security main
    deb http://mirrors.aliyun.com/ubuntu xenial-security universe
    deb-src http://mirrors.aliyun.com/ubuntu xenial-security universe
    

    开始部署

    # restore gitlab data command example:
    #   kubectl -n gitlab-ver130806 exec -it $(kubectl -n gitlab-ver130806 get pod|grep -v runner|grep gitlab|awk '{print $1}') -- gitlab-rake gitlab:backup:restore BACKUP=1602889879_2020_10_17_12.9.2
    #   kubectl -n gitlab-ver130806 exec -it $(kubectl -n gitlab-ver130806 get pod|grep -v runner|grep gitlab|awk '{print $1}') -- gitlab-ctl reconfigure
    #   kubectl -n gitlab-ver130806 exec -it $(kubectl -n gitlab-ver130806 get pod|grep -v runner|grep gitlab|awk '{print $1}') -- gitlab-ctl status
    
    # pv
    ---
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: gitlab-etc-ver130806
      labels:
        type: gitlab-etc-ver130806
    spec:
      capacity:
        storage: 1Gi
      accessModes:
        - ReadWriteOnce
      persistentVolumeReclaimPolicy: Retain
      storageClassName: nfs
      nfs:
        path: /nfs_dir/gitlab_etc_ver130806
        server: 10.0.1.201
    
    # pvc
    ---
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
      name: gitlab-etc-ver130806-pvc
    spec:
      accessModes:
        - ReadWriteOnce
      resources:
        requests:
          storage: 1Gi
      storageClassName: nfs
      selector:
        matchLabels:
          type: gitlab-etc-ver130806
    # pv
    ---
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: gitlab-log-ver130806
      labels:
        type: gitlab-log-ver130806
    spec:
      capacity:
        storage: 1Gi
      accessModes:
        - ReadWriteOnce
      persistentVolumeReclaimPolicy: Retain
      storageClassName: nfs
      nfs:
        path: /nfs_dir/gitlab_log_ver130806
        server: 10.0.1.201
    
    # pvc
    ---
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
      name: gitlab-log-ver130806-pvc
    spec:
      accessModes:
        - ReadWriteOnce
      resources:
        requests:
          storage: 1Gi
      storageClassName: nfs
      selector:
        matchLabels:
          type: gitlab-log-ver130806
          
    # pv
    ---
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: gitlab-opt-ver130806
      labels:
        type: gitlab-opt-ver130806
    spec:
      capacity:
        storage: 1Gi
      accessModes:
        - ReadWriteOnce
      persistentVolumeReclaimPolicy: Retain
      storageClassName: nfs
      nfs:
        path: /nfs_dir/gitlab_opt_ver130806
        server: 10.0.1.201
    
    # pvc
    ---
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
      name: gitlab-opt-ver130806-pvc
    spec:
      accessModes:
        - ReadWriteOnce
      resources:
        requests:
          storage: 1Gi
      storageClassName: nfs
      selector:
        matchLabels:
          type: gitlab-opt-ver130806
    ---
    apiVersion: v1
    kind: Service
    metadata:
      name: gitlab
      labels:
        app: gitlab
        tier: frontend
    spec:
      ports:
        - name: gitlab-ui
          port: 80
          protocol: TCP
          targetPort: 80
        - name: gitlab-ssh
          port: 22
          protocol: TCP
          targetPort: 22
      selector:
        app: gitlab
        tier: frontend
      type: NodePort
    ---
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: gitlab
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      name: gitlab-cb-ver130806
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: cluster-admin
    subjects:
      - kind: ServiceAccount
        name: gitlab
        namespace: gitlab-ver130806
    ---
    
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: gitlab
      labels:
        app: gitlab
        tier: frontend
    spec:
      replicas: 1
      selector:
        matchLabels:
          app: gitlab
          tier: frontend
      strategy:
        type: Recreate
      template:
        metadata:
          labels:
            app: gitlab
            tier: frontend
        spec:
          serviceAccountName: gitlab
          containers:
            - image: harbor.boge.com/library/gitlab-ce:13.8.6-ce.1
              name: gitlab
    #          resources:
    #            requests:
    #              cpu: 400m
    #              memory: 4Gi
    #            limits:
    #              cpu: "800m"
    #              memory: 8Gi
              securityContext:
                privileged: true
              env:
                - name: TZ
                  value: Asia/Shanghai
                - name: GITLAB_OMNIBUS_CONFIG
                  value: |
                    postgresql['enable'] = false
                    gitlab_rails['db_username'] = "gitlab"
                    gitlab_rails['db_password'] = "bogeusepg"
                    gitlab_rails['db_host'] = "postgresql"
                    gitlab_rails['db_port'] = "5432"
                    gitlab_rails['db_database'] = "gitlabhq_production"
                    gitlab_rails['db_adapter'] = 'postgresql'
                    gitlab_rails['db_encoding'] = 'utf8'
                    redis['enable'] = false
                    gitlab_rails['redis_host'] = 'redis'
                    gitlab_rails['redis_port'] = '6379'
                    gitlab_rails['redis_password'] = 'bogeuseredis'
                    gitlab_rails['gitlab_shell_ssh_port'] = 22
                    external_url 'http://git.boge.com/'
                    nginx['listen_port'] = 80
                    nginx['listen_https'] = false
                    #-------------------------------------------
                    gitlab_rails['gitlab_email_enabled'] = true
                    gitlab_rails['gitlab_email_from'] = 'admin@boge.com'
                    gitlab_rails['gitlab_email_display_name'] = 'boge'
                    gitlab_rails['gitlab_email_reply_to'] = 'gitlab@boge.com'
                    gitlab_rails['gitlab_default_can_create_group'] = true
                    gitlab_rails['gitlab_username_changing_enabled'] = true
                    gitlab_rails['smtp_enable'] = true
                    gitlab_rails['smtp_address'] = "smtp.exmail.qq.com"
                    gitlab_rails['smtp_port'] = 465
                    gitlab_rails['smtp_user_name'] = "gitlab@boge.com"
                    gitlab_rails['smtp_password'] = "bogesendmail"
                    gitlab_rails['smtp_domain'] = "exmail.qq.com"
                    gitlab_rails['smtp_authentication'] = "login"
                    gitlab_rails['smtp_enable_starttls_auto'] = true
                    gitlab_rails['smtp_tls'] = true
                    #-------------------------------------------
                    # 关闭 promethues
                    prometheus['enable'] = false
                    # 关闭 grafana
                    grafana['enable'] = false
                    # 减少内存占用
                    unicorn['worker_memory_limit_min'] = "200 * 1 << 20"
                    unicorn['worker_memory_limit_max'] = "300 * 1 << 20"
                    # 减少 sidekiq 的并发数
                    sidekiq['concurrency'] = 16
                    # 减少 postgresql 数据库缓存
                    postgresql['shared_buffers'] = "256MB"
                    # 减少 postgresql 数据库并发数量
                    postgresql['max_connections'] = 8
                    # 减少进程数   worker=CPU核数+1
                    unicorn['worker_processes'] = 2
                    nginx['worker_processes'] = 2
                    puma['worker_processes'] = 2
                    # puma['per_worker_max_memory_mb'] = 850
                    # 保留3天备份的数据文件
                    gitlab_rails['backup_keep_time'] = 259200
                    #-------------------------------------------
              ports:
                - containerPort: 80
                  name: gitlab
              livenessProbe:
                exec:
                  command:
                  - sh
                  - -c
                  - "curl -s http://127.0.0.1/-/health|grep -w 'GitLab OK'"
                initialDelaySeconds: 120
                periodSeconds: 10
                timeoutSeconds: 5
                successThreshold: 1
                failureThreshold: 3
              readinessProbe:
                exec:
                  command:
                  - sh
                  - -c
                  - "curl -s http://127.0.0.1/-/health|grep -w 'GitLab OK'"
                initialDelaySeconds: 120
                periodSeconds: 10
                timeoutSeconds: 5
                successThreshold: 1
                failureThreshold: 3
              volumeMounts:
                - mountPath: /etc/gitlab
                  name: gitlab1
                - mountPath: /var/log/gitlab
                  name: gitlab2
                - mountPath: /var/opt/gitlab
                  name: gitlab3
                - mountPath: /etc/localtime
                  name: tz-config
    
          volumes:
            - name: gitlab1
              persistentVolumeClaim:
                claimName: gitlab-etc-ver130806-pvc
            - name: gitlab2
              persistentVolumeClaim:
                claimName: gitlab-log-ver130806-pvc
            - name: gitlab3
              persistentVolumeClaim:
                claimName: gitlab-opt-ver130806-pvc
            - name: tz-config
              hostPath:
                path: /usr/share/zoneinfo/Asia/Shanghai
    
          securityContext:
            runAsUser: 0
            fsGroup: 0
    

    部署gitlab-tls

    # old version
    
    #apiVersion: extensions/v1beta1
    #kind: Ingress
    #metadata:
    #  name: gitlab
    #  annotations:
    #    nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
    #    nginx.ingress.kubernetes.io/proxy-body-size: "20m"
    #spec:
    #  tls:
    #  - hosts:
    #    - git.boge.com
    #    secretName: mytls
    #  rules:
    #  - host: git.boge.com
    #    http:
    #      paths:
    #      - path: /
    #        backend:
    #          serviceName: gitlab
    #          servicePort: 80
    
    # Add tls
    # openssl genrsa -out tls.key 2048
    # openssl req -new -x509 -key tls.key -out tls.cert -days 360 -subj /CN=*.boge.com
    # kubectl -n gitlab-ver130806 create secret tls mytls --cert=tls.cert --key=tls.key 
    
    # new version
    
    ## https://kubernetes.io/docs/concepts/services-networking/ingress/
    apiVersion: networking.k8s.io/v1
    kind: Ingress
    metadata:
      name: gitlab
      annotations:
        nginx.ingress.kubernetes.io/force-ssl-redirect: "false"
        nginx.ingress.kubernetes.io/proxy-body-size: "20m"
    spec:
      tls:
      - hosts:
        - git.boge.com
        secretName: mytls
      rules:
      - host: git.boge.com
        http:
          paths:
          - path: /
            pathType: Prefix
            backend:
              service:
                name: gitlab
                port:
                  number: 80
    
    ---
    

    这节课我们来讲gitlab里面的runner,gitlab的CI/CD自动化,都是由gitlab下发指令,依靠runner这个组件去执行的,我们这里也是把runner运行在k8s上面。

    runner按字面意思就是奔跑者的意思,它在整个自动化流程里面的角色也相当于一个外卖小哥,它接收gitlab下发的自动化指令,来去做相应的操作,从而实现整个CI/CD的效果。

    部署gitlab-runner

    docker

    #  mkdir -p /nfs_dir/{gitlab-runner1-ver130806-docker,gitlab-runner2-ver130806-share}
    
    # gitlab-ci-multi-runner register
    
    #                   Active  √ Paused Runners don't accept new jobs
    #                Protected     This runner will only run on pipelines triggered on protected branches
    #        Run untagged jobs     Indicates whether this runner can pick jobs without tags
    # Lock to current projects     When a runner is locked, it cannot be assigned to other projects
    
    # pv
    ---
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: gitlab-runner1-ver130806-docker
      labels:
        type: gitlab-runner1-ver130806-docker
    spec:
      capacity:
        storage: 0.1Gi
      accessModes:
        - ReadWriteMany
      persistentVolumeReclaimPolicy: Retain
      storageClassName: nfs
      nfs:
        path: /nfs_dir/gitlab-runner1-ver130806-docker
        server: 10.0.1.201
    
    # pvc
    ---
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
      name: gitlab-runner1-ver130806-docker
      namespace: gitlab-ver130806
    spec:
      accessModes:
        - ReadWriteMany
      resources:
        requests:
          storage: 0.1Gi
      storageClassName: nfs
      selector:
        matchLabels:
          type: gitlab-runner1-ver130806-docker
    
    
    ---
    # https://docs.gitlab.com/runner/executors
    
    #concurrent = 30
    #check_interval = 0
    
    #[session_server]
    #  session_timeout = 1800
    
    #[[runners]]
    #  name = "gitlab-runner1-ver130806-docker"
    #  url = "http://git.boge.com"
    #  token = "xxxxxxxxxxxxxxxxxxxxxx"
    #  executor = "kubernetes"
    #  [runners.kubernetes]
    #    namespace = "gitlab-ver130806"
    #    image = "docker:stable"
    #    helper_image = "gitlab/gitlab-runner-helper:x86_64-9fc34d48-pwsh"
    #    privileged = true
    #    [[runners.kubernetes.volumes.pvc]]
    #      name = "gitlab-runner1-ver130806-docker"
    #      mount_path = "/mnt"
    
    ---
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: gitlab-runner1-ver130806-docker
      namespace: gitlab-ver130806
    spec:
      replicas: 1
      selector:
        matchLabels:
          name: gitlab-runner1-ver130806-docker
      template:
        metadata:
          labels:
            name: gitlab-runner1-ver130806-docker
        spec:
          hostAliases:
          - ip: "10.68.140.109"
            hostnames:
            - "git.boge.com"
          serviceAccountName: gitlab
          containers:
          - args:
            - run
            image: gitlab/gitlab-runner:v13.10.0
            name: gitlab-runner1-ver130806-docker
            volumeMounts:
            - mountPath: /etc/gitlab-runner
              name: config
            - mountPath: /etc/ssl/certs
              name: cacerts
              readOnly: true
          restartPolicy: Always
          volumes:
          - persistentVolumeClaim:
              claimName: gitlab-runner1-ver130806-docker
            name: config
          - hostPath:
              path: /usr/share/ca-certificates/mozilla
            name: cacerts
    

    share

    # gitlab-ci-multi-runner register
    
    #                   Active  √ Paused Runners don't accept new jobs
    #                Protected     This runner will only run on pipelines triggered on protected branches
    #        Run untagged jobs  √ Indicates whether this runner can pick jobs without tags
    # Lock to current projects     When a runner is locked, it cannot be assigned to other projects
    
    # pv
    ---
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: gitlab-runner2-ver130806-share
      labels:
        type: gitlab-runner2-ver130806-share
    spec:
      capacity:
        storage: 0.1Gi
      accessModes:
        - ReadWriteMany
      persistentVolumeReclaimPolicy: Retain
      storageClassName: nfs
      nfs:
        path: /nfs_dir/gitlab-runner2-ver130806-share
        server: 10.0.1.201
    
    # pvc
    ---
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
      name: gitlab-runner2-ver130806-share
      namespace: gitlab-ver130806
    spec:
      accessModes:
        - ReadWriteMany
      resources:
        requests:
          storage: 0.1Gi
      storageClassName: nfs
      selector:
        matchLabels:
          type: gitlab-runner2-ver130806-share
    
    
    ---
    # https://docs.gitlab.com/runner/executors
    
    #concurrent = 30
    #check_interval = 0
    
    #[session_server]
    #  session_timeout = 1800
    
    #[[runners]]
    #  name = "gitlab-runner2-ver130806-share"
    #  url = "http://git.boge.com"
    #  token = "xxxxxxxxxxxxxxxx"
    #  executor = "kubernetes"
    #  [runners.kubernetes]
    #    namespace = "gitlab-ver130806"
    #    image = "registry.cn-beijing.aliyuncs.com/acs/busybox/busybox:v1.29.2"
    #    helper_image = "gitlab/gitlab-runner-helper:x86_64-9fc34d48-pwsh"
    #    privileged = false
    #    [[runners.kubernetes.volumes.pvc]]
    #      name = "gitlab-runner2-v1230-share"
    #      mount_path = "/mnt"
    
    ---
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: gitlab-runner2-ver130806-share
      namespace: gitlab-ver130806
    spec:
      replicas: 1
      selector:
        matchLabels:
          name: gitlab-runner2-ver130806-share
      template:
        metadata:
          labels:
            name: gitlab-runner2-ver130806-share
        spec:
          hostAliases:
          - ip: "10.68.140.109"
            hostnames:
            - "git.boge.com"
          serviceAccountName: gitlab
          containers:
          - args:
            - run
            image: gitlab/gitlab-runner:v13.10.0
            name: gitlab-runner2-ver130806-share
            volumeMounts:
            - mountPath: /etc/gitlab-runner
              name: config
            - mountPath: /etc/ssl/certs
              name: cacerts
              readOnly: true
          restartPolicy: Always
          volumes:
          - persistentVolumeClaim:
              claimName: gitlab-runner2-ver130806-share
            name: config
          - hostPath:
              path: /usr/share/ca-certificates/mozilla
            name: cacerts
    

    增加gitlab在k8s的内部解析

    为什么这么做呢,博哥这里总结了两点原因:

    1. 优化gitlab网络通信,对于runner要调用gitlab服务来说,直接走内部地址速度更快
    2. 如果是在用阿里云的同学,采用在k8s上部署gitlab的话,那么k8s内部服务比如runner是不能通过同集群前面的公网入口SLB来请求访问的,这里阿里云自身网络架构原因,这个时候我们只需要做如下配置即可完美解决
    # kubectl -n kube-system get configmaps coredns  -o yaml
    apiVersion: v1
    data:
      Corefile: |
        .:53 {
            errors
            health
            ready
            log
            rewrite stop {
              name regex git.boge.com gitlab.gitlab-ver130806.svc.cluster.local
              answer name gitlab.gitlab-ver130806.svc.cluster.local git.boge.com
            }
    
            kubernetes cluster.local in-addr.arpa ip6.arpa {
    
              pods verified
              fallthrough in-addr.arpa ip6.arpa
            }
            autopath @kubernetes
            prometheus :9153
            forward . /etc/resolv.conf
            cache 30
            loop
            reload
            loadbalance
        }
    kind: ConfigMap
    metadata:
      name: coredns
      namespace: kube-system
    

    增加ssh端口转发

    我们要保持所有开发人员能使用默认的22端口来通过ssh拉取代码,那么就需要做如下端口转发配置

    # 注意配置此转发前,需要将对应NODE的本身ssh连接端口作一下修改,以防后面登陆不了该机器
    iptables -t nat -A PREROUTING -d 10.0.1.204 -p tcp --dport 22 -j DNAT --to-destination 10.0.1.204:31755
    
    #↑ 删除上面创建的这一条规则,将-A换成-D即可
    
    iptables -t nat  -nvL PREROUTING
    

    接着我们找一台机器,这里我们选取10.0.1.201这台机器,加一条本地hosts 10.0.1.204 git.boge.com,来试下推送gitlab代码仓库有无问题,详细操作见本节同名视频课程,希望大家能对着视频自己动手操作一遍,理解上面这些配置的含义,后面可以举一反三,在k8s的其他服务也可以这么来做,达到访问更优的效果。

    部署dind(docker in docker)

    大家好,我是博哥爱运维。我们现在在k8s来部署dind服务,提供整个CI(持续集成)的功能。

    我们看看docker version列出的结果 Docker采取的是C/S架构 Docker进程默认不监听任何端口,它会生成一个socket(/var/run/docker.sock)文件来进行本地进程通信 Docker C/S 之间采取Rest API作为通信协议,我们可以让Docker daemon进程监听一个端口,这就为我们用docker client调用远程调用docker daemon进程执行镜像构建提供了可行性

    第15关k8s架构师课程之基于gitlab的CICD自动化六

    docker in docker

    # dind pip instll staus : kill -9  code 137(128+9) ,may be limits(cpu,memory) resources need change
    
    # only have docker client ,use dind can be use normal
    #dindSvc=$(kubectl -n kube-system get svc dind |awk 'NR==2{print $3}')
    #export DOCKER_HOST="tcp://${dindSvc}:2375/"
    #export DOCKER_DRIVER=overlay2
    #export DOCKER_TLS_CERTDIR=""
    
    
    ---
    # SVC
    kind: Service
    apiVersion: v1
    metadata:
      name: dind
      namespace: kube-system
    spec:
      selector:
        app: dind
      ports:
        - name: tcp-port
          port: 2375
          protocol: TCP
          targetPort: 2375
    
    ---
    # Deployment
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: dind
      namespace: kube-system
      labels:
        app: dind
    spec:
      replicas: 1
      selector:
        matchLabels:
          app: dind
      template:
        metadata:
          labels:
            app: dind
        spec:
          hostNetwork: true
          containers:
          - name: dind
            #image: docker:19-dind
            image: harbor.boge.com/library/docker:19-dind
            lifecycle:
              postStart:
                exec:
                  command: ["/bin/sh", "-c", "docker login harbor.boge.com -u 'admin' -p 'boge666'"]
               # 3. when delete this pod , use this keep kube-proxy to flush role done
              preStop:
                exec:
                  command: ["/bin/sh", "-c", "sleep 5"]
            ports:
            - containerPort: 2375
    #        resources:
    #          requests:
    #            cpu: 200m
    #            memory: 256Mi
    #          limits:
    #            cpu: 0.5
    #            memory: 1Gi
            readinessProbe:
              tcpSocket:
                port: 2375
              initialDelaySeconds: 10
              periodSeconds: 30
            livenessProbe:
              tcpSocket:
                port: 2375
              initialDelaySeconds: 10
              periodSeconds: 30
            securityContext: 
                privileged: true
            env: 
              - name: DOCKER_HOST 
                value: tcp://localhost:2375
              - name: DOCKER_DRIVER 
                value: overlay2
              - name: DOCKER_TLS_CERTDIR 
                value: ''
            volumeMounts: 
              - name: docker-graph-storage
                mountPath: /var/lib/docker
              - name: tz-config
                mountPath: /etc/localtime
               # kubectl -n kube-system create secret generic harbor-ca --from-file=harbor-ca=/data/harbor/ssl/tls.cert
              - name: harbor-ca
                mountPath: /etc/docker/certs.d/harbor.boge.com/ca.crt
                subPath: harbor-ca
           # kubectl create secret docker-registry boge-secret --docker-server=harbor.boge.com --docker-username=admin --docker-password=boge666 --docker-email=admin@boge.com
          hostAliases:
          - hostnames:
            - harbor.boge.com
            ip: 10.0.1.204
          imagePullSecrets:
          - name: bogeharbor
          volumes:
    #      - emptyDir:
    #          medium: ""
    #          sizeLimit: 10Gi
          - hostPath:
              path: /var/lib/container/docker
            name: docker-graph-storage
          - hostPath:
              path: /usr/share/zoneinfo/Asia/Shanghai
            name: tz-config
          - name: harbor-ca
            secret:
              secretName: harbor-ca
              defaultMode: 0600
    #
    #        kubectl taint node 10.0.1.201 Ingress=:NoExecute
    #        kubectl describe node 10.0.1.201 |grep -i taint
    #        kubectl taint node 10.0.1.201 Ingress:NoExecute-
          nodeSelector:
            kubernetes.io/hostname: "10.0.1.201"
          tolerations:
          - operator: Exists
    

    CI/CD生产实战项目

    大家好,我是博哥爱运维。这节课我们开始最终CI/CD自动化流程实战,终于要到打大BOSS大结局了,博哥自从2021年3月1日开始分享这套K8S架构师课程以来,坚持每天整理文档录制视频,一直坚持到今天,在这期间,博哥认识了不少喜欢K8S的朋友,也收到了很多朋友的鼓励和建议,这对博哥都是宝贵的财富。有些人可能会想,在现如今这个社会,免费的东西还存在嘛?免费的东西就是最贵的东西,诚然,这些博哥也认同,但也不能排除网上也有很多热爱技术,执着分享的人,像国内外很多大牛开源出来很多优化的代码项目,像优秀的操作系统LINUX,像谷歌开源的这套K8S系统等等,博哥虽然做不到这么优秀,但也想把自己工作中的一些踩坑经验积累分享给大家,要说私心嘛,就是博哥想锻炼下自己的讲课经验,拓宽下自己的职业发展路线,但这个和我分享给大家的内容不相冲突,反而我认为它们是有利的,相辅相成的,博哥分享的所有东西都是实实在在工作中拿下来的生产经验,再精心整理来作分享。

    大家一定要仔细观看,多多操作,把整个流程都掌握透彻。这里我会采用目前企业较常见的编程语言python的flask模块来实施完整的项目自动化流程步骤,其他语言都可以参照这个项目来实施自动化流程。

    先把k8s的二进制命令行工具kubectl容器化备用

    FROM harbor.boge.com/library/alpine:3.13
    
    MAINTAINER boge
    
    ENV TZ "Asia/Shanghai"
    
    RUN sed -ri 's+dl-cdn.alpinelinux.org+mirrors.aliyun.com+g' /etc/apk/repositories 
     && apk add --no-cache curl tzdata ca-certificates 
     && cp -f /usr/share/zoneinfo/Asia/Shanghai /etc/localtime 
     && apk upgrade 
     && rm -rf /var/cache/apk/*
    
    COPY kubectl /usr/local/bin/
    RUN chmod +x /usr/local/bin/kubectl
    
    ENTRYPOINT ["kubectl"]
    CMD ["help"]
    

    python的flask模块

    准备好flask相关的代码文件上传到gitlab代码仓库

    app.py

    from flask import Flask
    app = Flask(__name__)
    
    @app.route('/')
    def hello_world():
        return 'Hello, boge! 21.04.11.01'
    
    @app.route('/gg/<username>')
    def hello(username):
        return 'welcome' + ': ' + username + '!'
    

    Dockerfile

    FROM harbor.boge.com/library/python:3.5-slim-stretch
    MAINTAINER boge
    
    WORKDIR /kae/app
    
    COPY requirements.txt .
    
    RUN  sed -i 's/deb.debian.org/ftp.cn.debian.org/g' /etc/apt/sources.list 
      && sed -i 's/security.debian.org/ftp.cn.debian.org/g' /etc/apt/sources.list 
      && apt-get update -y 
      && apt-get install -y wget gcc libsm6 libxext6 libglib2.0-0 libxrender1 make 
      && apt-get clean && apt-get autoremove -y && rm -rf /var/lib/apt/lists/*
    RUN pip install --no-cache-dir -i https://mirrors.aliyun.com/pypi/simple -r requirements.txt 
        && rm requirements.txt
    
    COPY . .
    
    EXPOSE 5000
    HEALTHCHECK CMD curl --fail http://localhost:5000 || exit 1
    
    ENTRYPOINT ["gunicorn", "app:app", "-c", "gunicorn_config.py"]
    

    gunicorn_config.py

    bind = '0.0.0.0:5000'
    graceful_timeout = 3600
    timeout = 1200
    max_requests = 1200
    workers = 1
    worker_class = 'gevent'
    

    requirements.txt

    flask
    gevent
    gunicorn
    

    在代码仓库变量配置里面配置如下变量值

    Type           Key                      Value                    State        Masked
    Variable   DOCKER_USER                 admin                   下面都关闭   下面都关闭
    Variable   DOCKER_PASS                 boge666
    Variable   REGISTRY_URL                harbor.boge.com
    Variable   REGISTRY_NS                 product
    File       KUBE_CONFIG_TEST            k8s相关config配置文件内容
    

    准备项目自动化配置文件.gitlab-ci.yml

    stages:
      - build
      - deploy
      - rollback
    
    # tag name need: 20.11.21.01
    variables:
      namecb: "flask-test"
      svcport: "5000"
      replicanum: "2"
      ingress: "flask-test.boge.com"
      certname: "mytls"
      CanarylIngressNum: "20"
    
    .deploy_k8s: &deploy_k8s |
      if [ $CANARY_CB -eq 1 ];then cp -arf .project-name-canary.yaml ${namecb}-${CI_COMMIT_TAG}.yaml; sed -ri "s+CanarylIngressNum+${CanarylIngressNum}+g" ${namecb}-${CI_COMMIT_TAG}.yaml; sed -ri "s+NomalIngressNum+$(expr 100 - ${CanarylIngressNum})+g" ${namecb}-${CI_COMMIT_TAG}.yaml ;else cp -arf .project-name.yaml ${namecb}-${CI_COMMIT_TAG}.yaml;fi
      sed -ri "s+projectnamecb.boge.com+${ingress}+g" ${namecb}-${CI_COMMIT_TAG}.yaml
      sed -ri "s+projectnamecb+${namecb}+g" ${namecb}-${CI_COMMIT_TAG}.yaml
      sed -ri "s+5000+${svcport}+g" ${namecb}-${CI_COMMIT_TAG}.yaml
      sed -ri "s+replicanum+${replicanum}+g" ${namecb}-${CI_COMMIT_TAG}.yaml
      sed -ri "s+mytls+${certname}+g" ${namecb}-${CI_COMMIT_TAG}.yaml
      sed -ri "s+mytagcb+${CI_COMMIT_TAG}+g" ${namecb}-${CI_COMMIT_TAG}.yaml
      sed -ri "s+harbor.boge.com/library+${IMG_URL}+g" ${namecb}-${CI_COMMIT_TAG}.yaml
      cat ${namecb}-${CI_COMMIT_TAG}.yaml
      [ -d ~/.kube ] || mkdir ~/.kube
      echo "$KUBE_CONFIG" > ~/.kube/config
      if [ $NORMAL_CB -eq 1 ];then if kubectl get deployments.|grep -w ${namecb}-canary &>/dev/null;then kubectl delete deployments.,svc ${namecb}-canary ;fi;fi
      kubectl apply -f ${namecb}-${CI_COMMIT_TAG}.yaml --record
      echo
      echo
      echo "============================================================="
      echo "                    Rollback Indx List"
      echo "============================================================="
      kubectl rollout history deployment ${namecb}|tail -5|awk -F"[ =]+" '{print $1"	"$5}'|sed '$d'|sed '$d'|sort -r|awk '{print $NF}'|awk '$0=""NR".   "$0'
    
    .rollback_k8s: &rollback_k8s |
      [ -d ~/.kube ] || mkdir ~/.kube
      echo "$KUBE_CONFIG" > ~/.kube/config
      last_version_command=$( kubectl rollout history deployment ${namecb}|tail -5|awk -F"[ =]+" '{print $1"	"$5}'|sed '$d'|sed '$d'|tail -${ROLL_NUM}|head -1 )
      last_version_num=$( echo ${last_version_command}|awk '{print $1}' )
      last_version_name=$( echo ${last_version_command}|awk '{print $2}' )
      kubectl rollout undo deployment ${namecb} --to-revision=$last_version_num
      echo $last_version_num
      echo $last_version_name
      kubectl rollout history deployment ${namecb}
    
    
    build:
      stage: build
      retry: 2
      variables:
        # use dind.yaml to depoy dind'service on k8s
        DOCKER_HOST: tcp://10.68.86.33:2375/
        DOCKER_DRIVER: overlay2
        DOCKER_TLS_CERTDIR: ""
      ##services:
        ##- docker:dind
      before_script:
        - docker login ${REGISTRY_URL} -u "$DOCKER_USER" -p "$DOCKER_PASS"
      script:
        - docker pull ${REGISTRY_URL}/${REGISTRY_NS}/${namecb}:latest || true
        - docker build --network host --cache-from ${REGISTRY_URL}/${REGISTRY_NS}/${namecb}:latest --tag ${REGISTRY_URL}/${REGISTRY_NS}/${namecb}:$CI_COMMIT_TAG --tag ${REGISTRY_URL}/${REGISTRY_NS}/${namecb}:latest .
        - docker push ${REGISTRY_URL}/${REGISTRY_NS}/${namecb}:$CI_COMMIT_TAG
        - docker push ${REGISTRY_URL}/${REGISTRY_NS}/${namecb}:latest
      after_script:
        - docker logout ${REGISTRY_URL}
      tags:
        - "docker"
      only:
        - tags
    
    
    
    
    
    #--------------------------K8S DEPLOY--------------------------------------------------
    
    BOGE-deploy:
      stage: deploy
      image: harbor.boge.com/library/kubectl:v1.19.9
      variables:
        KUBE_CONFIG: "$KUBE_CONFIG_TEST"
        IMG_URL: "${REGISTRY_URL}/${REGISTRY_NS}"
        NORMAL_CB: 1
      script:
        - *deploy_k8s
      when: manual
      only:
        - tags
    
    # canary start
    BOGE-canary-deploy:
      stage: deploy
      image: harbor.boge.com/library/kubectl:v1.19.9
      variables:
        KUBE_CONFIG: "$KUBE_CONFIG_TEST"
        IMG_URL: "${REGISTRY_URL}/${REGISTRY_NS}"
        CANARY_CB: 1
      script:
        - *deploy_k8s
      when: manual
      only:
        - tags
    # canary end
    
    BOGE-rollback-1:
      stage: rollback
      image: harbor.boge.com/library/kubectl:v1.19.9
      variables:
        KUBE_CONFIG: "$KUBE_CONFIG_TEST"
        ROLL_NUM: 1
      script:
        - *rollback_k8s
      when: manual
      only:
        - tags
    
    
    BOGE-rollback-2:
      stage: rollback
      image: harbor.boge.com/library/kubectl:v1.19.9
      variables:
        KUBE_CONFIG: "$KUBE_CONFIG_TEST"
        ROLL_NUM: 2
      script:
        - *rollback_k8s
      when: manual
      only:
        - tags
    
    
    BOGE-rollback-3:
      stage: rollback
      image: harbor.boge.com/library/kubectl:v1.19.9
      variables:
        KUBE_CONFIG: "$KUBE_CONFIG_TEST"
        ROLL_NUM: 3
      script:
        - *rollback_k8s
      when: manual
      only:
        - tags
    

    准备k8s的deployment模板文件 .project-name.yaml

    这里要注意提前在K8S把harbor拉取的凭证secret给创建好,命令如下:

    kubectl -n test create secret docker-registry boge-secret --docker-server=harbor.boge.com --docker-username=admin --docker-password=boge666 --docker-email=admin@boge.com

    ---
    # SVC
    kind: Service
    apiVersion: v1
    metadata:
      labels:
        kae: "true"
        kae-app-name: projectnamecb
        kae-type: app
      name: projectnamecb
    spec:
      selector:
        kae: "true"
        kae-app-name: projectnamecb
        kae-type: app
      ports:
        - name: http-port
          port: 80
          protocol: TCP
          targetPort: 5000
    #      nodePort: 12345
    #  type: NodePort
    
    ---
    # Ingress
    apiVersion: extensions/v1beta1
    kind: Ingress
    metadata:
      labels:
        kae: "true"
        kae-app-name: projectnamecb
        kae-type: app
      name: projectnamecb
    spec:
      tls:
      - hosts:
        - projectnamecb.boge.com
        secretName: mytls
      rules:
      - host: projectnamecb.boge.com
        http:
          paths:
          - path: /
            backend:
              serviceName: projectnamecb
              servicePort: 80
    
    ---
    # Deployment
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: projectnamecb
      labels:
        kae: "true"
        kae-app-name: projectnamecb
        kae-type: app
    spec:
      replicas: replicanum
      selector:
        matchLabels:
          kae-app-name: projectnamecb
      template:
        metadata:
          labels:
            kae: "true"
            kae-app-name: projectnamecb
            kae-type: app
        spec:
          containers:
          - name: projectnamecb
            image: harbor.boge.com/library/projectnamecb:mytagcb
            env:
              - name: TZ
                value: Asia/Shanghai
            ports:
            - containerPort: 5000
            readinessProbe:
              httpGet:
                scheme: HTTP
                path: /
                port: 5000
              initialDelaySeconds: 10
              periodSeconds: 5
              timeoutSeconds: 3
              successThreshold: 1
              failureThreshold: 3
            livenessProbe:
              httpGet:
                scheme: HTTP
                path: /
                port: 5000
              initialDelaySeconds: 10
              periodSeconds: 5
              timeoutSeconds: 3
              successThreshold: 1
              failureThreshold: 3
            resources:
              requests:
                cpu: 0.3
                memory: 0.5Gi
              limits:
                cpu: 0.3
                memory: 0.5Gi
          imagePullSecrets:
          - name: boge-secret
    

    准备好K8S上金丝雀部署的模板文件 .project-name-canary.yaml

    ---
    # SVC
    kind: Service
    apiVersion: v1
    metadata:
      labels:
        kae: "true"
        kae-app-name: projectnamecb-canary
        kae-type: app
      name: projectnamecb-canary
    spec:
      selector:
        kae: "true"
        kae-app-name: projectnamecb-canary
        kae-type: app
      ports:
        - name: http-port
          port: 80
          protocol: TCP
          targetPort: 5000
    #      nodePort: 12345
    #  type: NodePort
    
    ---
    # Ingress
    apiVersion: extensions/v1beta1
    kind: Ingress
    metadata:
      labels:
        kae: "true"
        kae-app-name: projectnamecb-canary
        kae-type: app
      name: projectnamecb
      annotations:
        nginx.ingress.kubernetes.io/service-weight: |
            projectnamecb: NomalIngressNum, projectnamecb-canary: CanarylIngressNum
    spec:
      tls:
      - hosts:
        - projectnamecb.boge.com
        secretName: mytls
      rules:
      - host: projectnamecb.boge.com
        http:
          paths:
          - path: /
            backend:
              serviceName: projectnamecb
              servicePort: 80
          - path: /
            backend:
              serviceName: projectnamecb-canary
              servicePort: 80
    
    ---
    # Deployment
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: projectnamecb-canary
      labels:
        kae: "true"
        kae-app-name: projectnamecb-canary
        kae-type: app
    spec:
      replicas: replicanum
      selector:
        matchLabels:
          kae-app-name: projectnamecb-canary
      template:
        metadata:
          labels:
            kae: "true"
            kae-app-name: projectnamecb-canary
            kae-type: app
        spec:
          containers:
          - name: projectnamecb-canary
            image: harbor.boge.com/library/projectnamecb:mytagcb
            env:
              - name: TZ
                value: Asia/Shanghai
            ports:
            - containerPort: 5000
            readinessProbe:
              httpGet:
                scheme: HTTP
                path: /
                port: 5000
              initialDelaySeconds: 10
              periodSeconds: 5
              timeoutSeconds: 3
              successThreshold: 1
              failureThreshold: 3
            livenessProbe:
              httpGet:
                scheme: HTTP
                path: /
                port: 5000
              initialDelaySeconds: 10
              periodSeconds: 5
              timeoutSeconds: 3
              successThreshold: 1
              failureThreshold: 3
            resources:
              requests:
                cpu: 0.3
                memory: 0.5Gi
              limits:
                cpu: 0.3
                memory: 0.5Gi
          imagePullSecrets:
          - name: boge-secret
    

    最后,在修改完代码,提交tag版本号后,即会触发CI/CD自动化流程,详细操作可以看博哥录制的同名视频教程好了。

  • 相关阅读:
    springboot2 + prometheus + grafana 监控整合
    vs code 快捷键总结
    java8 concurrecy
    java8 localdatetime timestamp 转换
    有意思的网站
    评价搜索引擎质量
    转载一篇文章
    csdn 站点使用
    百度站点平台
    好的文章聚合站点
  • 原文地址:https://www.cnblogs.com/Serverlessops/p/14799486.html
Copyright © 2011-2022 走看看