zoukankan      html  css  js  c++  java
  • docker Dockerfile 创建镜像

    Docker 组件

    1. docker client : docker的客户端

    2. docker server : docker daemon的主要组成部分,接受用户通过docker client发送的请求,并按照响应的路由规则实时路由分发。

    3. docker image : docker镜像运行之后变成容器 (docker run),启动快,采用了分层模式。

    4.docker Registry: registry是 docker镜像的中央存储仓库(pull/push)

    docker 使用yum 安装最新版

    [root@docker1 yum.repos.d]# cat docker.repo 
    [dockerrepo]
    name=Docker Repository
    baseurl=https://yum.dockerproject.org/repo/main/centos/7
    enabled=1
    gpgcheck=1
    gpgkey=https://yum.dockerproject.org/gpg

    docker 安装

    [root@docker1 ~]# yum -y install docker-engine
    
    启动docker
    [root@docker1 ~]# systemctl start docker.service
    [root@docker1 ~]# systemctl enable docker.service

    使用dockerfile 生成docker镜像, 从docker.io 下载镜像。

    搜索centos镜像
    [root@docker1 ~]#docker search  centos
    
    pull centos镜像
    [root@docker1 ~]#docker pull centos
    
    查看下载的镜像
    [root@docker1 ~]# docker images
    REPOSITORY    TAG    IMAGE ID            CREATED             SIZE
    
    centos          latest   67591570dd29      7 weeks ago         192 MB

      

    [root@docker1 ~]# git clone https://git.oschina.net/dockerf/docker-training.git
    [root@docker1 ~]# ls
    docker-training

    [root@docker1 ~]# cd docker-training
    [root@docker1 docker-training]# ls

    centos7 mysql php-fpm wordpress 4个目录
    构建一个 centos7 php-fpm mysql wordpress 的docker镜像

    [root@docker1 centos7]# ls
    1.repo Centos-7.repo Dockerfile supervisord.conf

    Dockerfile 是一个自动构建docker镜像的配置文件,

    [root@docker1 centos7]# cat Dockerfile 
    #需要一个基础镜像,centos7.1.1503 是从docker上pull下来
    #FROM       centos:centos7.1.1503
    FROM       centos:latest
     
    #维护者
    MAINTAINER fengjian <fengjian@senyint.com>
    #设置一个时区的环境变量
    ENV TZ
    "Asia/Shanghai"
    #虚拟终端
    ENV TERM xterm
    
    #dockerfile中有2条命令可以复制文件,1.copy 2.add, add比copy多2个功能,可以写成连接 直接copy到container,如果是压缩文件,add能自动解压 ADD Centos
    -7.repo /etc/yum.repos.d/CentOS-Base.repo ADD 1.repo /etc/yum.repos.d/epel.repo RUN yum install -y curl wget tar bzip2 libtool-ltdl-devel unzip vim-enhanced passwd sudo yum-utils hostname net-tools rsync man && yum install -y gcc gcc-c++ git make automake cmake patch logrotate python-devel libpng-devel libjpeg-devel && yum install -y python-pip #RUN pip install --upgrade pip
    #supervisor 进程管理工具,运行单个进程可以不使用
    RUN pip install
    -i https://pypi.tuna.tsinghua.edu.cn/simple supervisor ADD supervisord.conf /etc/supervisord.conf
    #/etc/supervisor.conf.d 存放启动进程的配置文件 RUN mkdir
    -p /etc/supervisor.conf.d && mkdir -p /var/log/supervisor
    #
    container想暴露22端口给宿主机
    EXPOSE 22
    #最后一条ENTRYPOINT 才能生效
    ENTRYPOINT ["/usr/bin/supervisord", "-n", "-c", "/etc/supervisord.conf"]
    [root@docker1 centos7]# cat 1.repo 
    [bash]
    name=centos7
    baseurl=http://192.168.20.220/centos7/Packages/
    enabled=1
    gpgcheck=0
    

      

    [root@docker1 centos7]# cat Centos-7.repo 
    # CentOS-Base.repo
    #
    # The mirror system uses the connecting IP address of the client and the
    # update status of each mirror to pick mirrors that are updated to and
    # geographically close to the client.  You should use this for CentOS updates
    # unless you are manually picking other mirrors.
    #
    # If the mirrorlist= does not work for you, as a fall back you can try the 
    # remarked out baseurl= line instead.
    #
    #
     
    [base]
    name=CentOS-$releasever - Base - mirrors.aliyun.com
    failovermethod=priority
    baseurl=http://mirrors.aliyun.com/centos/$releasever/os/$basearch/
            http://mirrors.aliyuncs.com/centos/$releasever/os/$basearch/
    #mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os
    gpgcheck=1
    gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
     
    #released updates 
    [updates]
    name=CentOS-$releasever - Updates - mirrors.aliyun.com
    failovermethod=priority
    baseurl=http://mirrors.aliyun.com/centos/$releasever/updates/$basearch/
            http://mirrors.aliyuncs.com/centos/$releasever/updates/$basearch/
    #mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates
    gpgcheck=1
    gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
     
    #additional packages that may be useful
    [extras]
    name=CentOS-$releasever - Extras - mirrors.aliyun.com
    failovermethod=priority
    baseurl=http://mirrors.aliyun.com/centos/$releasever/extras/$basearch/
            http://mirrors.aliyuncs.com/centos/$releasever/extras/$basearch/
    #mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras
    gpgcheck=1
    gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
     
    #additional packages that extend functionality of existing packages
    [centosplus]
    name=CentOS-$releasever - Plus - mirrors.aliyun.com
    failovermethod=priority
    baseurl=http://mirrors.aliyun.com/centos/$releasever/centosplus/$basearch/
            http://mirrors.aliyuncs.com/centos/$releasever/centosplus/$basearch/
    #mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=centosplus
    gpgcheck=1
    enabled=0
    gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
     
    #contrib - packages by Centos Users
    [contrib]
    name=CentOS-$releasever - Contrib - mirrors.aliyun.com
    failovermethod=priority
    baseurl=http://mirrors.aliyun.com/centos/$releasever/contrib/$basearch/
            http://mirrors.aliyuncs.com/centos/$releasever/contrib/$basearch/
    #mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=contrib
    gpgcheck=1
    enabled=0
    gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
    [root@docker1 centos7]# cat supervisord.conf 
    [unix_http_server]
    file=/var/run/supervisor.sock ; (the path to the socket file)
    chmod=0700              ; socket file mode (default 0700)
    
    [supervisord]
    logfile=/var/log/supervisor/supervisord.log ; (main log file;default $CWD/supervisord.log)
    logfile_maxbytes=50MB
    logfile_backup=10
    loglevel=info
    pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid)
    nodaemon=true           ; (Start in foreground if true; default false)
    minfds=1024                 ; (min. avail startup file descriptors;default 1024)
    minprocs=200                ; (min. avail process descriptors;default 200)
    
    [rpcinterface:supervisor]
    supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
    
    [supervisorctl]
    serverurl=unix:///var/run/supervisor.sock ; use a unix:// URL  for a unix socket
    
    [include]
    files = /etc/supervisor.conf.d/*.conf

    从docker.io 下载centos,根据dockerfile 生成docker镜像, 
    [root@docker1 centos7]# docker build -t fengjian/centos:7.3  .

    查看生成的镜像

    [root@docker1 centos7]#  docker images

    REPOSITORY             TAG                 IMAGE ID            CREATED             SIZE
    fengjian/centos         7.1                 03a49ca4b7b9        13 days ago         667.8 MB

    通过docker镜像 生成docker 容器。 docker run命令

    -p 小写, container 端口,指定到宿主机的端口

    -P  大写, container的 端口,映射到宿主机的 随机端口

    生成docker 容器

    [root@docker1 ~]#docker run -d -p 2222:22 --name base fengjian/centos:7.3

    查看容器信息

    [root@docker1 ~]#docker ps -a

    [root@docker1 ~]# docker ps -a
    CONTAINER ID        IMAGE                COMMAND                  CREATED              STATUS              PORTS                            NAMES
    ebce60d09d31        fengjian/centos:7.1   "/usr/bin/supervisord"   About a minute ago   Up About a minute   2222/tcp, 0.0.0.0:2222->22/tcp   base

    进入docker container中
    [root@docker1 ~]# docker exec -it ebce60d09d31 /bin/bash

    制作php-fpm的镜像

    [root@docker1 php-fpm]# ls
    Dockerfile  nginx_default.conf  nginx_nginx.conf  php_www.conf  supervisor_nginx.conf  supervisor_php-fpm.conf
    [root@docker1 php-fpm]# vim Dockerfile 
    
    
    FROM       fengjian/centos:7.3
    MAINTAINER fengjian <fengjian@senyint.com>
    
    # Set environment variable
    ENV     APP_DIR /app
    
    RUN     yum -y swap -- remove fakesystemd -- install systemd systemd-libs && 
            yum -y install nginx php-cli php-mysql php-pear php-ldap php-mbstring php-soap php-dom php-gd php-xmlrpc php-fpm php-mcrypt && 
            yum clean all
    
    ADD     nginx_nginx.conf /etc/nginx/nginx.conf
    ADD     nginx_default.conf /etc/nginx/conf.d/default.conf
    
    ADD     php_www.conf /etc/php-fpm.d/www.conf
    RUN     sed -i 's/;cgi.fix_pathinfo=1/cgi.fix_pathinfo=0/' /etc/php.ini
    
    RUN     mkdir -p /app && echo "<?php phpinfo(); ?>" > ${APP_DIR}/info.php
    
    EXPOSE  80 443
    
    ADD     supervisor_nginx.conf /etc/supervisor.conf.d/nginx.conf
    ADD     supervisor_php-fpm.conf /etc/supervisor.conf.d/php-fpm.conf
    
    ONBUILD ADD . /app
    ONBUILD RUN chown -R nginx:nginx /app
    [root@docker1 php-fpm]# vim supervisor_nginx.conf 
    
    [program:nginx]
    directory=/
    command=/usr/sbin/nginx -c /etc/nginx/nginx.conf
    user=root
    autostart=true
    autorestart=true
    stdout_logfile=/var/log/supervisor/%(program_name)s.log
    stderr_logfile=/var/log/supervisor/%(program_name)s.log
    
    
    [program:php-fpm]
    directory=/
    command=/usr/sbin/php-fpm
    user=root
    autostart=true
    autorestart=true
    stdout_logfile=/var/log/supervisor/%(program_name)s.log
    stderr_logfile=/var/log/supervisor/%(program_name)s.log

    创建php-fpm的 镜像

    [root@docker1 php-fpm]# docker build -t fengjian/php-fpm:5.4  .

    启动一个container

    [root@docker1 php-fpm]# docker run -d -p 8080:80 --name wesite fengjian/php-fpm:5.4

    访问宿主机的 8080端口

    http://192.168.20.209:8080/info.php

    创建mysql镜像

    [root@docker1 mysql]# docker build -t fengjian/mysql:5.5 .

    ###docker run -d -p 3306:3306 -v host_dir:container_dir   -v参数是映射container路径到宿主机上###########

    [root@docker1 mysql]# docker run -d -p 3306:3306 -v  /data/mysql/data:/var/lib/mysql  --name dbserver fengjian/mysql:5.5

    宿主机上的/data/mysql/data目录文件

    [root@docker1 data]# ll /data/mysql/data/
    total 28700
    -rw-rw---- 1 27   27    16384 Jan 19 15:25 aria_log.00000001
    -rw-rw---- 1 27   27       52 Jan 19 15:25 aria_log_control
    drwx------ 2 27   27       19 Jan 19 15:40 fengjian
    -rw-rw---- 1 27   27 18874368 Jan 19 15:25 ibdata1
    -rw-rw---- 1 27   27  5242880 Jan 19 15:26 ib_logfile0
    -rw-rw---- 1 27   27  5242880 Jan 19 15:25 ib_logfile1
    drwx------ 2 27 root     4096 Jan 19 15:25 mysql
    srwxrwxrwx 1 27   27        0 Jan 19 15:26 mysql.sock
    drwx------ 2 27   27     4096 Jan 19 15:25 performance_schema
    drwx------ 2 27 root        6 Jan 19 15:25 test
    [root@docker1 data]# 

     删除container后, 在生成新的container 数据还可以继续使用。
    [root@docker1 data]#  docker rm -f dbserver (删除)
    [root@docker1 data]#  docker run -d -p 3306:3306 -v /data/mysql/data:/var/lib/mysql --name newmysqldb  fengjian/mysql:5.5

    构建动态网站wordpress, 使用php-fpm镜像

    [root@docker1 wordpress]# ls
    Dockerfile  init.sh      readme.html      wp-admin            wp-comments-post.php  wp-content   wp-includes        wp-load.php   wp-mail.php      wp-signup.php     xmlrpc.php
    index.php   license.txt  wp-activate.php  wp-blog-header.php  wp-config-sample.php  wp-cron.php  wp-links-opml.php  wp-login.php  wp-settings.php  wp-trackback.php
    [root@docker1 wordpress]# vim Dockerfile

    from
    fengjian/php-fpm:5.4 add init.sh /init.sh entrypoint ["/init.sh", "/usr/bin/supervisord", "-n", "-c", "/etc/supervisord.conf"]
    #先执行 /init.sh脚本, 然后再执行后面的服务,其实是启动了nginx 和php-fpm

    父镜像php-fpm 的Dockerfile最后两行

    ONBUILD ADD . /app
    ONBUILD RUN chown -R nginx:nginx /app

    ONBUILD 在构建 wordpress 的时候生效

    所有的代码文件全部copy到/app下, 但是Dockerfile没有作用,所以 可以在目录下新建一个.dockerignore, 排除Dockerfile

    [root@docker1 wordpress]# vim .dockerignore

    Dockerfile

    创建wordpress镜像,版本4.2

    [root@docker1 wordpress]# docker build -t fengjian/wordpress:4.2  .

    查看镜像

    [root@docker1 wordpress]# docker images
    REPOSITORY             TAG                 IMAGE ID            CREATED             SIZE
    fengjian/wordpress      4.2                 8591f07cc2e2        15 seconds ago      848.4 MB
    fengjian/mysql          5.5                 b54f78aeefb8        21 hours ago        848.2 MB
    fengjian/php-fpm        5.4                 fc1856e25486        21 hours ago        810.8 MB
    fengjian/centos         7.1                 fbafb1b36c30        21 hours ago        712.8 MB
    tomcat                 latest              47bd812c12f6        5 weeks ago         355.2 MB
    mysql                  latest              594dc21de8de        5 weeks ago         400.1 MB
    centos                 centos7.1.1503      285396d0a019        4 months ago        212.1 MB
    centos                 centos7.1.1503      285396d0a019        4 months ago        212.1 MB
    kubeguide/tomcat-app   v1                  a29e200a18e9        6 months ago        358.2 MB

    启动一个容器, 使用-e 参数,传递环境变量,WORDPRESS_DB_HOST是在init.sh 脚本中定义的

    [root@docker1 wordpress]# docker run -d -p 80:80 --name wordpress  -e WORDPRESS_DB_HOST=192.168.20.209 -e WORDPRESS_DB_USER=fengjian -e WORDPRESS_DB_PASSWORD=123456 fengjian/wordpress:4.2



    访问ip的80端口

     ENTRYPOINT  与  CMD 命令区别
    
    运行一个Docker容器像运行一个程序一样,如果写了10条,只有最后一条生效。
    
    1. ENTRYPOINT ["executable","param1","param2"]
    2. ENTRPOINT command param1 param2 (shell from)
    
    docker run -it -entrypoint=覆盖 Dockerfile ENTRYPOINT[]


    CMD 用法
    1.CMD["executable","param1","param2"] (exec from , this is the preferred form)
    第一种用法:运行一个可执行的文件并提供参数

    2.CMD [“param1”,"param2"] (as default parameters to ENTRYPOINT)
    第二种用法: 为ENTRPOINT指定参数

    3.CMD command param1 param2 (sehll form)
    第三种用法(shell form): 是以"/bin/sh -c" 的方法执行的命令

    例子:
    CMD ["/bin/echo","This is test CMD"]
    docker run -it -rm fengjian/cmd:0.1 /bin/bash

    ################################################################################################# 

    第二docker 实战之 Registry 以及持续集成

    构建一个企业内部的registry

    docker1: 192.168.20.209

    registry: 192.168.20.135  5000端口 

    [root@registry ~]# docker search registry

    再次把registry pull到本地

    [root@registry ~]# docker pull registry
    Using default tag: latest
    latest: Pulling from library/registry
    b7f33cc0b48e: Pull complete 
    46730e1e05c9: Pull complete 
    458210699647: Pull complete 
    0cf045fea0fd: Pull complete 
    b78a03aa98b7: Pull complete 
    Digest: sha256:0e40793ad06ac099ba63b5a8fae7a83288e64b50fe2eafa2b59741de85fd3b97
    Status: Downloaded newer image for registry:latest

    查看docker1 192.168.20.209  镜像

    [root@docker1 ~]# docker images
    REPOSITORY             TAG                 IMAGE ID            CREATED             SIZE
    fengjian/wordpress      4.2                 8591f07cc2e2        2 days ago          848.4 MB
    fengjian/mysql          5.5                 b54f78aeefb8        3 days ago          848.2 MB
    fengjian/php-fpm        5.4                 fc1856e25486        3 days ago          810.8 MB
    fengjian/centos         7.1                 fbafb1b36c30        3 days ago          712.8 MB
    registry               latest              d1e32b95d8e8        4 days ago          33.17 MB
    tomcat                 latest              47bd812c12f6        5 weeks ago         355.2 MB
    mysql                  latest              594dc21de8de        5 weeks ago         400.1 MB
    centos                 centos7.1.1503      285396d0a019        4 months ago        212.1 MB
    centos                 centos7.1.1503      285396d0a019        4 months ago        212.1 MB

    查看registry 192.168.20.135  镜像

    [root@registry ~]# docker images
    REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
    registry            latest              d1e32b95d8e8        2 weeks ago         33.2 MB

    registry启动一个 registry的容器

    [root@registry ~]# docker run -d -p 5000:5000 -v /opt/registry:/var/lib/registry -p 5000:5000 --restart=always --name registry registry:latest

    Registry服务默认会将上传的镜像保存在容器的/var/lib/registry,我们将主机的/opt/registry目录挂载到该目录,即可实现将镜像保存到主机的/opt/registry目录了。

    registry_url: 启动的registry,本地registry:5000地址

    namespace : 指定目录

    name: 镜像的名字

    registry_url/namespace/tomcat:v1.0

    通过 docker tag 可以为容器打一个标记,类似于别名的作用,

    [root@docker1 ~] docker pull fengjian/fengjian
    对fengjian 打标记成 192.168.20.209:5000/fengjian/fengjian/fengjian:20170122v1.0
    [root@docker1 ~]# docker tag fengjian/fengjian:latest  192.168.20.135:5000/fengjian/fengjian/fengjian:20170122v1.0

       查看镜像

    [root@docker1 ~]# docker images
    REPOSITORY                                    TAG                 IMAGE ID            CREATED             SIZE
    fengjian/wordpress                             4.2                 8591f07cc2e2        2 days ago          848.4 MB
    fengjian/mysql                                 5.5                 b54f78aeefb8        3 days ago          848.2 MB
    fengjian/php-fpm                               5.4                 fc1856e25486        3 days ago          810.8 MB
    fengjian/centos                                7.1                 fbafb1b36c30        3 days ago          712.8 MB
    registry                                      latest              d1e32b95d8e8        4 days ago          33.17 MB
    tomcat                                        latest              47bd812c12f6        5 weeks ago         355.2 MB
    mysql                                         latest              594dc21de8de        5 weeks ago         400.1 MB
    centos                                        centos7.1.1503      285396d0a019        4 months ago        212.1 MB
    centos                                        centos7.1.1503      285396d0a019        4 months ago        212.1 MB
    kubeguide/tomcat-app                          v1                  a29e200a18e9        6 months ago        358.2 MB
    192.168.20.135:5000/fengjian/fengjian/fengjian   20170122v1.0        3468c34fa83b        13 months ago       97.95 MB
    fengjian/fengjian                               latest              3468c34fa83b        13 months ago       97.95 MB


    运行docker push将hello-world镜像push到我们的私有仓库中

      [root@docker ~]# docker push  192.168.20.135:5000/fengjian/fengjian/fengjian:20170122v1.0



      The push refers to a repository [192.168.20.135:5000/fengjian/nginx20170203]
      Get https://192.168.20.135:5000/v1/_ping: http: server gave HTTP response to HTTPS client

    出现无法push镜像到私有仓库的问题。这是因为我们启动的registry服务不是安全可信赖的。这是我们需要修改docker的配置文件/usr/lib/systemd/system/docker.service,添加下面的内容,注意: registry 和 docker1 2台服务器都需要修改

    上传镜像到 192.168.20.135

    [root@docker overlay]# docker push  192.168.20.135:5000/fengjian/fengjian/fengjian:20170122v1.0


    The push refers to a repository [192.168.20.135:5000/fengjian/fengjian/fengjian:20170122v1.0]

    23c8d40ebb9e: Pushed
    5526182de2ab: Pushed
    652f3c2c3f57: Pushed
    bf76891beffc: Pushed
    f696adb3bd45: Pushed
    46db44806cd4: Pushed
    2dd577fe2559: Pushed
    bbc4847eb1d2: Pushed
    747f5baee8ac: Pushed
    29003cbb49e1: Pushed
    f5d4b5d6f2ff: Pushed
    ee745a500b91: Pushed
    3383431a5cc0: Pushed
    8aabcc6c5e8d: Pushed
    967105df7f61: Pushed
    0c051da11cb4: Pushed
    34e7b85d83e4: Pushed
    v1: digest: sha256:4e5d763dfb99ecd95128d1033e14bb4740613045c89bb2646006ac7db08f5a6f size: 3871

    通过浏览器,查询上传结果

    使用docker pull从我们的私有仓库中获取192.168.20.135:5000/fengjian/fengjian/fengjian:20170122v1.0镜像

    [root@docker ~ ]# docker pull 192.168.20.135:5000/fengjian/nginx20170203:v1

    v1: Pulling from fengjian/nginx20170203
    17385548ba54: Already exists
    59da822a5404: Already exists
    ec5de50f3658: Already exists
    751fb563feef: Already exists
    8145f1a2090b: Already exists
    575600a5843d: Already exists
    035deb98f67f: Already exists
    2e1f8c7e36ce: Already exists
    3cf27705cd77: Pull complete
    d4e37a9633b1: Pull complete
    1aab1e953ef2: Pull complete
    31afde0ced92: Pull complete
    253eadce8153: Pull complete
    750606d876c8: Pull complete
    f96cc19c204f: Pull complete
    eea9946ffb66: Pull complete
    da59d6a4a8bd: Pull complete
    Digest: sha256:4e5d763dfb99ecd95128d1033e14bb4740613045c89bb2646006ac7db08f5a6f
    Status: Downloaded newer image for 192.168.20.135:5000/fengjian/nginx20170203:v1

    192.168.20.209启动一个容器

    [root@docker ~ ]# docker run -d -p 8081:80 --name nginx 192.168.20.135:5000/fengjian/nginx20170203:v1

    [root@docker ~ ]# docker ps -a
    CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
    7539cd87c9bd 192.168.20.135:5000/fengjian/nginx20170203:v1 "/usr/bin/supervis..." 2 minutes ago Up 2 minutes 22/tcp, 0.0.0.0:8081->80/tcp nginx

    登陆到容器中

    [root@docker overlay]# docker exec -it nginx /bin/bash

    容器nginx启动正常

    [root@7539cd87c9bd nginx-1.11.2]# ps -ef
    UID PID PPID C STIME TTY TIME CMD
    root 1 0 0 17:21 ? 00:00:00 /usr/bin/python2 /usr/bin/supervisord -n -c /etc/supervisord.conf
    root 9 1 0 17:21 ? 00:00:00 nginx: master process /data/nginx/sbin/nginx
    nobody 10 9 0 17:21 ? 00:00:00 nginx: worker process
    root 88 0 1 17:25 ? 00:00:00 /bin/bash
    root 104 1 0 17:25 ? 00:00:00 /data/nginx/sbin/nginx
    root 105 88 0 17:25 ? 00:00:00 ps -ef

    注: registry服务器,暂时看不到上传的image镜像。已经存在的镜像,在

    [root@registry repositories]# pwd
    /data/registry/docker/registry/v2/repositorie

    docker  registry   https服务
    
    1. 启动registry仓库的镜像,与上面相同
    [root@registry ~]#
    docker run -d -p 5000:5000 -v /opt/registry:/var/lib/registry --restart=always --name registry registry:latest
    2. 关闭 /usr/lib/systemd/system/docker.service 配置文件中 "--insecure-registry=192.168.20.135:5000"
    [root@registry ~]# systemctl daemon-reload
    [root@registry ~]# systemctl restart docker.service
    
    3. 启动 nginx容器 映射443端口
    [root@registry ~]#  docker pull nginx (也可以自己制作nginx镜像)
    运行nginx容器
    [root@registry ~]#  docker run -d -p 443:443 --name nginx  nginx:latest
    
    4. 修改nginx 配置文件,已经添加域名证书
    [root@registry ~]#  docker cp  nginx.conf  /etc/nginx/nginx.conf 
    [root@registry ~]#  docker cp  sslkey  /etc/nginx/
    5. 登陆到容器中重新启动nginx
    [root@123131nginx ~]#   /etc/init.d/nginx restart
    
    ########################
    nginx.conf  配置文件
    
    events {
        worker_connections  1024;
    }
    
    http {
    
      upstream docker-registry {
        server 192.168.20.135:5000;
      }
    
      server {
        listen 443 ssl;
        server_name docker.cinyi.com;
    
        ssl_certificate /data/nginx/sslkey/cinyi.crt;
        ssl_certificate_key /data/nginx/sslkey/cinyi.key;
    
        client_max_body_size 0;
    
        chunked_transfer_encoding on;
    
        location / {
          proxy_pass                          http://docker-registry;
          proxy_set_header  Host              $http_host;
          proxy_set_header  X-Real-IP         $remote_addr;
          proxy_set_header  X-Forwarded-For   $proxy_add_x_forwarded_for;
          proxy_set_header  X-Forwarded-Proto $scheme;
          proxy_read_timeout                  900;
        }
      }
    }


    6. 在 192.168.20.209 服务器上,查看镜像,对镜像打tag,然后push 到registry

    [root@docker1 ~]# docker images
    [root@docker1 ~]# docker tag senyint/im-web docker.cinyi.com:443/senyint/im-web:443
    [root@docker1 ~]# docker push  docker.cinyi.com:443/senyint/im-web:443
     
    查看registry 里面存储的镜像
    
    使用 Registry V2 API。可以列出所有镜像:
    
    curl http://<私有registry地址>/v2/_catalog
    
    例如 
    [root@docker225 ~]# curl https://docker.cinyi.com/v2/_catalog
    {"repositories":["fengjian/nginx20170203","mysql20170203","senyint/centos7.3","senyint/im-web","senyint/nginx"]}



    查看registry 里面存储的镜像tag
    
    使用 Registry V2 API。可以列出所有镜像:
    
    curl GET <protocol>://<registry_host>/v2/<镜像名>/tags/list
    
    例如 
    [root@docker225 ~]# curl GET https://docker.cinyi.com/v2/senyint/im-web/tags/list
    {"name":"senyint/im-web","tags":["latest","443"]}
    删除docker.cinyi.com docker registry的镜像
    
    1.在启动仓库时,需在配置文件中的storage配置中增加delete=true配置项,允许删除镜像
    [root@registry ~]#tail -f /etc/docker/registry/config.yml文件 ,修改容器yml文件后,一直显示在报错,后续在处理
    storage:
      delete:
        enabled: true

    2. 拿到disgest_hash参数 curl --header "Accept: application/vnd.docker.distribution.manifest.v2+json" -I -X GET https://docker.cinyi.com/v2/senyint/nginx/manifests/latest HTTP/1.1 200 OK Server: nginx/1.11.2 Date: Wed, 15 Feb 2017 01:24:56 GMT Content-Type: application/vnd.docker.distribution.manifest.v2+json Content-Length: 3669 Connection: keep-alive Docker-Content-Digest: sha256:609a595020f0827301064ebc07b3ec3a5751641ef975a7a186518cf6b0d70f63 Docker-Distribution-Api-Version: registry/2.0 Etag: "sha256:609a595020f0827301064ebc07b3ec3a5751641ef975a7a186518cf6b0d70f63" X-Content-Type-Options: nosniff 3.复制disgest_hash Docker-Content-Digest: <digest_hash>
    Docker-Content-Digest: sha256:609a595020f0827301064ebc07b3ec3a5751641ef975a7a186518cf6b0d70f63
    4.删除registry镜像
      curl -I -X DELETE <protocol>://<registry_host>/v2/<repo_name>/manifests/<digest_hash>
    [root@docker225 ~]# curl -I  -X DELETE https://docker.cinyi.com/v2/senyint/im-web/manifests/sha256:609a595020f0827301064ebc07b3ec3a5751641ef975a7a186518cf6b0d70f63


    第二种方法

    1. 打开镜像的存储目录, 删除镜像文件夹

      [root@registry repositories]# docker exec registry rm -rf senyint

     2.执行垃圾回收操作
      [root@registry repositories]# docker exec registry /bin/registry garbage-collect /etc/docker/registry/config.yml

      3. 重启容器

      [root@registry repositories]#  docker restart registry

    docker-compose编排工具安装,一次启动多个容器。

    #对安装好的pip进行一次升级
    pip install --upgrade pip
    
    安装docker-compose
    pip install docker-compose
    
    运行docker-compose
    出现报错
    pkg_resources.DistributionNotFound: backports.ssl-match-hostname>=3.5
    
    使用pip 更新backports.ssl-match-hostname的版本
    pip install --upgrade backports.ssl_match_hostname
    更新backports.ssl_match_hostname 到3.5版本后问题解决

    [root@docker1 certs]# docker-compose up
    ERROR:
    Can't find a suitable configuration file in this directory or any
    parent. Are you in the right directory?

    Supported filenames: docker-compose.yml, docker-compose.yaml

    编写docker-compose.yml 文件

    [root@docker1 second]# vim docker-compose.yml

    mysql:
    image: fengjian/mysql:5.5
    ports:
    - "3306:3306"
    volumes:
    - /var/lib/docker/vfs/dir/dataxc:/var/lib/mysql
    hostname: mydb.server.com

    tomcat:
    image: tomcat
    ports:
    - "8080:8080"
    links:
    - mysql:db
    environment:
    - TOMCAT_USER=admin
    - TOMCAT_PASS=admin
    hostname: tomcat.server.com

    在后台启动,在docker-compose.yml 下启动

    [root@docker1 second]# docker-compose up -d

    查看启动的容器

    [root@docker1 second]# docker ps -a
    CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
    5b844baf351e tomcat "catalina.sh run" 6 minutes ago Up 6 minutes 0.0.0.0:8080->8080/tcp second_tomcat_1
    f88ccf720119 fengjian/mysql:5.5 "/scripts/start" 6 minutes ago Up 6 minutes 22/tcp, 0.0.0.0:3306->3306/tcp second_mysql_1

    停掉2个container

    [root@docker1 second]# docker-compose stop

    通过ps 查看通过docker-compose启动的有那些container。

    [root@docker1 second]# docker-compose ps
    Name Command State Ports
    ----------------------------------------------------
    second_mysql_1 /scripts/start Exit 137
    second_tomcat_1 catalina.sh run Exit 143

    删除通过docker-compose建立的2个container.

    [root@docker1 second]# docker-compose rm
    Name Command State Ports
    ----------------------------------------------------
    second_mysql_1 /scripts/start Exit 137
    second_tomcat_1 catalina.sh run Exit 143

     

    通过docker仓库自动构建(jenkins)

    构建jenkins 镜像
    构建maven镜像:

    [root@docker /]# mkdir maven-tar

    [root@docker /]# cd maven-tar/

    [root@docker maven-tar]# wget http://mirror.bit.edu.cn/apache/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz

    docker最大的优势在于部署,jenkins最强大的在于作业调度和插件系统,如何结合两者?

    创建一个jenkins镜像

    apache-maven-3.3.9-bin.tar.gz  Dockerfile  jdk.tar.gz  jenkins.war  rc.local  settings.xml  supervisor_tomcat.conf  tomcat

    DockerFile 文件
    FROM       centos7.3:20170204
    MAINTAINER fengjian <fengjian@senyint.com>
    
    
    # Install maven
    ADD apache-maven-3.3.9-bin.tar.gz /data/
    ADD jdk.tar.gz  /data/
    COPY tomcat /data/tomcat
    COPY jenkins.war /data/tomcat/webapps/
    
    COPY settings.xml /data/maven/conf/settings.xml
    ADD  supervisor_tomcat.conf /etc/supervisor.conf.d/tomcat.conf
    supervisor tomcat的启动配置文件
    [root@docker maven-tar]# vim supervisor_tomcat.conf 
    
    [program:tomcat]
    directory=/
    command=/data/tomcat/bin/catalina.sh start
    user=root
    autostart=true
    autorestart=true
    stdout_logfile=/var/log/supervisor/%(program_name)s.log
    stderr_logfile=/var/log/supervisor/%(program_name)s.log
    ~                                                       

    tomcat 启动文件 vim /data/tomcat/bin/catalina.sh 

    export JENKINS_HOME="/data/jenkins_home"
    export JAVA_HOME=/data/jdk
    export JRE_HOME=${JAVA_HOME}/jre
    export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib
    
    # OS specific support.  $var _must_ be set to either true or false.
    创建jenkins镜像, 启动jenkins镜像

    [root@docker ~]# docker build -t jenkins .
    [root@docker ~]# docker run -d -p 8080:80 --name jenkins jenkins

    把 container 容器 打包成镜像

    1. 关闭jenkins
    [root@docker ~ ]# docker stop jenkins
    
    root@docker maven-tar]# docker ps -a
    CONTAINER ID        IMAGE      COMMAND                   CREATED             STATUS                   PORTS                          NAMES
    9174cf36cdfc        jenkins     "/usr/bin/supervis..."   2 hours ago         Up About an hour         22/tcp, 0.0.0.0:8080->80/tcp   jenkins
    
    
    2. 把容器打包成镜像
    [root@docker ~ ]# docker  commit   9174cf36cdfc   jenkins20170204

    3. 查看新的jenkins 镜像
    [root@docker ~] docker images

      REPOSITORY       TAG     IMAGE ID     CREATED       SIZE
      jenkins20170204    latest    5254a69cb614    41 seconds ago    1.62 GB



    jenkins镜像里内置了docker client命令行工具,/usr/bin/docker,因此我们只需要传递 DOCKER_HOST 环境变量 或者映射 docker.sock 文件给jenkins容器,就可以让jenkins容器里面拥有docker的操作能力,进而将两者结合起来。

    创建jenkins 新的镜像,把/usr/bin/docker 和 /var/run/docker.sock 映射给 jenkins 容器

    创建jenkins 新的镜像,把/usr/bin/docker 和 /var/run/docker.sock 映射给 jenkins 容器
    docker run -d -p 8080:80 -v /usr/bin/docker:/usr/bin/docker -v /var/run/docker.sock:/var/run/docker.sock --name jenkins20170204 jenkins20170204:latest

    登陆到jenkins容器中,查看jenkins 进程是否启动

    [root@docker ~]# docker exec -it jenkins20170204 /bin/bash

      

    测试docker 是否可用


    解决办法:

    yum install libtool-ltdl-devel

    再次执行,查看启动的容器

    查看镜像


    打开浏览器,通过jenkins,把build-nginx git到jenkins本地后,打包成镜像。

    注意 $WORKSPACE是指定的 git 下载的路径名称:比如 build-nginx


     开始构建

     已经生成镜像。

    java项目实现流程

            1.jenkins 新建一个项目

            2.从git仓库把项目克隆到本地

            3.通过docker构建成镜像

            4.上传到registry服务器

            5.client 通过 docker pull registry镜像到本地,启动容器。

            6.测试环境,研发环境,通过images传递。

     分成三层: 1. 基础镜像

               2. 中间件镜像

               3. 应用镜像

    构建java项目docker镜像,首先 把java 用maven 编译,并且构建成镜像,通过 docker cp命令把war包拷贝 到中间件。

    FROM       centos7.3:20170204
    MAINTAINER fengjian <fengjian@senyint.com>
    
    
    # Install maven
    ADD apache-maven-3.3.9-bin.tar.gz /data/
    ADD jdk.tar.gz  /data
    #COPY tomcat /data/tomcat
    #COPY jenkins.war /data/tomcat/webapps/
    
    COPY  apache-maven-3.3.9   /data/maven
    COPY settings.xml /data/maven/conf/settings.xml
    CMD  ["source /etc/profile"]
    #ADD  supervisor_tomcat.conf /etc/supervisor.conf.d/tomcat.conf
    
    ADD hello /hello
    RUN cd /hello && 
        /data/maven/bin/mvn install package
    1.构建maven镜像,已经通过mvn install package 编译java代码
    [root@docker ~]# docker build -t senyint/maven:v1 
    
    2.创建 maven 容器,但是不启动
    [root@docker ~]# docker create --name maven senyint/maven:v1
    
    3.把hello.war 的包 从maven容器中拷贝出来
    [root@docker ~]# docker cp  maven:/hello/target/hello.war  .
    
    
    #########################################################################################################################################################################################################
    自己总结

     分成三层: 1. 基础镜像                  centos:7.3      supervisor

                    2. 中间件镜像               java  maven    tomcat

                    3. 应用镜像                  java项目 war包 (jenkins   ........)



    1. centos7.3 基础镜像

    Dockerfile centos7.3基础镜像

    FROM centos:latest
    MAINTAINER fengjian <fengjian@senyint.com> ENV TZ "Asia/Shanghai" ENV TERM xterm ADD 1.repo /etc/yum.repos.d/1.repo ADD aliyun-mirror.repo /etc/yum.repos.d/CentOS-Base.repo ADD aliyun-epel.repo /etc/yum.repos.d/epel.repo RUN yum install -y curl openssl* wget libtool-ltdl-devel tar bzip2 unzippasswd sudo yum-utils hostname net-tools && yum install -y gcc gcc-c++ git make automake cmake patch logrotate python-devel libpng-devel libjpeg-devel && yum install -y --enablerepo=epel pwgen python-pip && yum clean all RUN pip install -i https://pypi.tuna.tsinghua.edu.cn/simple supervisor ADD supervisord.conf /etc/supervisord.conf RUN mkdir -p /etc/supervisor.conf.d && mkdir -p /var/log/supervisor ENTRYPOINT ["/usr/bin/supervisord", "-n", "-c", "/etc/supervisord.conf"]
    supervisord.conf 配置文件
    
    [root@docker centos7]# cat supervisord.conf 
    [unix_http_server]
    file=/var/run/supervisor.sock ; (the path to the socket file)
    chmod=0700              ; socket file mode (default 0700)
    
    [supervisord]
    logfile=/var/log/supervisor/supervisord.log ; (main log file;default $CWD/supervisord.log)
    logfile_maxbytes=50MB
    logfile_backup=10
    loglevel=info
    pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid)
    nodaemon=true           ; (Start in foreground if true; default false)
    minfds=1024                 ; (min. avail startup file descriptors;default 1024)
    minprocs=200                ; (min. avail process descriptors;default 200)
    
    [rpcinterface:supervisor]
    supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
    
    [supervisorctl]
    serverurl=unix:///var/run/supervisor.sock ; use a unix:// URL  for a unix socket
    
    [include]
    files = /etc/supervisor.conf.d/*.conf
    二.中间件镜像

    [root@docker jdk]# ls
    Dockerfile jdk.tar.gz  maven.tar.gz  profile  supervisor_tomcat.conf  tomcat.tar.gz

    [root@docker jdk]# vim profile   最底部添加环境变量
    
    export JRE_HOME=${JAVA_HOME}/jre
    export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib
    export PATH=$JAVA_HOME/bin:$JRE_HOME/bin:$JAVA_HOME:$PATH
    
    MAVEN_HOME=/data/maven
    export MAVEN_HOME
    export PATH=${PATH}:${MAVEN_HOME}/bin
    [root@docker jdk]# vim supervisor_tomcat.conf    用于启动tomcat
    
    [program:tomcat]
    directory=/
    command=/data/tomcat/bin/catalina.sh start
    user=root
    autostart=true
    autorestart=true
    stdout_logfile=/var/log/supervisor/%(program_name)s.log
    stderr_logfile=/var/log/supervisor/%(program_name)s.log
    [root@docker jdk]# vim Dockerfile

    FROM senyint/centos7.3
    MAINTAINER fengjian <fengjian@senyint.com.com>

    ENV JAVA_HOME /data/jdk
    ENV JRE_HOME ${JAVA_HOME}/jre
    ENV CLASSPATH .:${JAVA_HOME}/lib:${JRE_HOME}/lib

    ENV MAVEN_HOME /data/maven
    ENV PATH ${PATH}:${MAVEN_HOME}/bin:$JAVA_HOME/bin:$JRE_HOME/bin:$JAVA_HOME:$PATH

    RUN mkdir -p /data/webserver
    ADD maven.tar.gz /data
    ADD jdk.tar.gz /data
    ADD tomcat.tar.gz /data
    ADD profile /etc
    #ADD env.sh /etc/profile.d/

    ADD supervisord.conf /etc/supervisord.conf
    ADD supervisor_tomcat.conf /etc/supervisor.conf.d/tomcat.conf

    RUN mkdir -p /etc/supervisor.conf.d &&
    mkdir -p /var/log/supervisor

    ENTRYPOINT ["/usr/bin/supervisord", "-n", "-c", "/etc/supervisord.conf"]

     
     

    FROM docker.cinyi.com:443/centos7.3
    
    #维护者
    MAINTAINER fengjian <fengjian@senyint.com>
    #设置一个时区的环境变量
    ENV TZ "Asia/Shanghai"
    #虚拟终端
    ENV TERM xterm
    
    ENV JAVA_HOME /data/jdk
    ENV JRE_HOME ${JAVA_HOME}/jre
    ENV CLASSPATH .:${JAVA_HOME}/lib:${JRE_HOME}/lib
    
    ENV MAVEN_HOME /data/maven
    ENV PATH ${PATH}:${MAVEN_HOME}/bin:$JAVA_HOME/bin:$JRE_HOME/bin:$JAVA_HOME:$PATH
    
    RUN mkdir -p /data/webserver
    ADD jdk.tar.gz /data
    ADD tomcat.tar.gz /data
    Add host.sh /data
    
    ADD profile /etc
    RUN chmod +x /data/host.sh ; /data/host.sh
    
    
    EXPOSE 80
    
    ENTRYPOINT ["/data/tomcat/bin/catalina.sh", "run" ]
     

    [root@docker jdk]# docker build -t senyint/tomcat:v1 .
    启动容器测试 java 环境变量 [root@docker jdk]# docker run
    -d -p 11112:80 --name tomcat1 senyint/tomcat:v1
    [root@docker jdk]# docker exec -it tomcat1 /bin/bash
    [root@docker jdk]# java -version 显示java版本
    [root@docker jdk]# mvn -version 显示maven版本

     

    3构建应用

    (1)构建jenkins

    jenkins Dockerfile文件
    [root@docker jenkins]# vim Dockerfile 
    
    
    

      FROM senyint/java1.8:latest

      MAINTAINER fengjian <fengjian@senyint.com.com>

      ENV JENKINS_HOME /data/jenkins_home

      ADD profile /etc/

      ADD jenkins.war /data/webserver/

      RUN unzip /data/webserver/jenkins.war -d /data/webserver &&
      rm /data/webserver/jenkins.war

      VOLUME /data/jenkins_home

    
    构建jenkins镜像
    [root@docker jenkins]# docker build -t senyint/jenkins .
    
    启动 jenkins 并且登陆到容器中
    [root@docker jenkins]# docker run -d -p 11111:80 -v /usr/bin/docker:/usr/bin/docker -v /var/run/docker.sock:/var/run/docker.sock -v /docker_project:/docker_project --name jenkins  senyint/jenkins

    -v /docker_project:/docker_project  这个是把/docker_project 目录映射到 jenkins 容器中, jenkins容器 编译完war包后,通过cp命令拷贝到 /docker_project/java工程目录/ ,通过senyint/tomcat镜像构建java项目镜像。
    [root@docker jenkins]# docker exec -it jenkins/bin/bash

    使用docker命令,
    以下为jenkins 配置,以及 docker 对im-web项目进行编译,构建im-web镜像,并且推送到registry.

    从git 仓库 下载java项目代码

    
    
    构建时出现操作方法

    registry="docker.cinyi.com:443"
    #取出项目目录
    javadir=`echo $WORKSPACE | awk -F'/' '{print $5}'`
    #取出war包名称
    javaname=`ls $WORKSPACE/target/*war | awk -F'/' '{print $7}' | cut -d . -f 1`

    mkdir -p /data/docker_project/$javadir
    rm /data/docker_profile/$javadir/$javaname.war -rf
    mv $WORKSPACE/target/$javaname.war /data/docker_project/$javadir

    #在/data/docker_project 目录下有一个dockerfile模版,根据war包的名字替换成新的dockerfile
    sed "s/jenkins/$javaname/g" /data/docker_project/Dockerfile >/data/docker_project/$javadir/Dockerfile

    if docker images | grep $javaname ; then
    docker rmi -f docker.cinyi.com:443/senyint/$javaname
    fi

    docker build -t docker.cinyi.com:443/senyint/$javaname /data/docker_project/$javadir/
    docker push docker.cinyi.com:443/senyint/$javaname


    #定义namespace 为test:

    k8s_apicurl="curl --cacert /root/ca.pem"
    k8s_url="https://192.168.20.227:6443"

    #创建namespaces
    if ! `$k8s_apicurl -H "Authorization: Bearer 199e9c8d4ce99c61" -X GET $k8s_url/api/v1/namespaces | grep test >/dev/null` ;then
    $k8s_apicurl -H "Authorization: Bearer 199e9c8d4ce99c61" -H "content-Type: application/yaml" -X POST $k8s_url/api/v1/namespaces -d "$(cat /root/namespaces.yaml)"
    fi

    #创建service
    if ! `$k8s_apicurl -H "Authorization: Bearer 199e9c8d4ce99c61" -X GET $k8s_url/api/v1/namespaces/test/services | grep "im-web" >/dev/null` ; then
    $k8s_apicurl -H "Authorization: Bearer 199e9c8d4ce99c61" -H "content-Type: application/yaml" -X POST $k8s_url/api/v1/namespaces/test/services -d "$(cat /root/im-web_service.yaml)"
    fi

    #创建deployment
    if ! `$k8s_apicurl -H "Authorization: Bearer 199e9c8d4ce99c61" -X GET $k8s_url/apis/extensions/v1beta1/namespaces/test/deployments | grep "im-web" >/dev/null` ; then
    $k8s_apicurl -H "Authorization: Bearer 199e9c8d4ce99c61" -H "content-Type: application/yaml" -X POST $k8s_url/apis/extensions/v1beta1/namespaces/test/deployments/ -d "$(cat /root/im-web_deployment.yaml)"
    fi

     把docker 命令 sock 直接挂载到容器中,进行打包,做成镜像。

    docker run -d -p 80:80 --restart=always -v /usr/bin/docker:/usr/bin/docker -v /var/run/docker.sock:/var/run/docker.sock -v /data/docker_project:/data/docker_project  -v /data/jenkins_home:/data/jenkins_home -v /etc/sysconfig/docker:/etc/sysconfig/docker  senyint/jenkins

    登陆到容器中,出现docker-client, docker版本是1.12.6

    [root@6882772021f0 /]# docker ps -a
    You don't have either docker-client or docker-client-latest installed. Please install either one and retry.

    1.13版本好像没有问题,1.12.6在容器中yum install docker-client

    [root@6882772021f0 /]# yum -y install docker-client

    ###################################################################################

    docker资源隔离 使用linux LXC容器技术,主要是使用namespace(命名空间)

    kernel namespace(资源隔离) 分为
    1.PID,通过pid 隔离,容器有自己独立的进程表和1号进程
    2.net,通过网络隔离,容器有自己独立的network info
    3.ipc,进程间的交互方法,在ipc通信时,需要加入额外信息来标识进程
    4.mnt, 类似chroot,每个容器有自己唯一的目录挂载
    5.uts, 可以让容器拥有自己的hostname 和domain
    6.user, 可以拥有不通的用户,组


    docker  网络模式
    
    1. Nat  网络地址转换
    
    2. Host 
    创建host网络, 和宿主机的网络相同
    [root@docker data]# docker run -d --name centos7-host --net=host centos7.3:20170204


    3.other container

    容器之间相互通信十分频繁,可以使用这种模式,container网络特点

    1.与主机网络空间隔离

    2.容器间共享网络空间

    3.适合容器间网络通信频繁。

    [root@docker data]# docker run -d --name centos7-nat centos7.3:20170204   nat模式
    [root@docker data]# docker run -d --name centos-container --net=container:centos7-nat  centos7.3:20170204

    centos-container容器的 ip地址 与 centos7-nat 的地址相同


    4. none docker 容器 无网络配置,可自行配置。
    [root@docker data]# docker run -d --name centos-none --net=none  centos7.3:20170204

    登陆到容器后,没有eth0
    5. overlay

    overlay 网络特点

    1. 跨主机通信

    2.无需做端口管理

    3.无需担心IP冲突

    Consul介绍 

    Consul 提供了分布式系统的服务发现和配置的解决方案。基于go语言实现。并且在git上开放了源码。consul还包括了分布式一致协议的实现,健康检查和管理UI.

    Consul Agent Server、Client介绍

    通过运行 consul agent 命令,可以通过后台守护进程的方式运行在所有consul集群节点中。以server或者client 模式运行。并且以HTTP或者DNS 接口方式,

    负责运行检查和服务同步。Server模式的agent负责维护consul集群状态,相应RPC查询,并且还要负责和其他数据中心进行WAN Gossips交换。client 节点是

    相对无状态的,Client的唯一活动就是转发请求给Server节点,以保持低延迟和少资源消耗。

    openvswitch docker 配置

    测试环境 ens32:192.168.20.209   docker0:10.0.1.1/24
         ens32:192.168.20.135   docker0:10.0.2.1/24
          ens32:192.168.20.223   docker0:10.0.3.1/24
           ens32:192.168.20.224    docker0:10.0.4.1/24

    192.168.20.20服务器操作
    [root@docker ~]# rpm -ivh openvswitch-2.5.0-2.el7.x86_64.rpm
    warning: openvswitch-2.5.0-2.el7.x86_64.rpm: Header V4 DSA/SHA1 Signature, key ID fac8d3c0: NOKEY
    error: Failed dependencies:
    libatomic.so.1()(64bit) is needed by openvswitch-2.5.0-2.el7.x86_64

    [root@docker ~]# yum -y install libatomic


    [root@docker ~]# systemctl start openvswitch.service

    [root@docker ~]# systemctl status openvswitch.service
    openvswitch.service - Open vSwitch
    Loaded: loaded (/usr/lib/systemd/system/openvswitch.service; disabled; vendor preset: disabled)
    Active: active (exited) since Sat 2017-02-11 10:00:11 CST; 17s ago
    Process: 3854 ExecStart=/bin/true (code=exited, status=0/SUCCESS)
    Main PID: 3854 (code=exited, status=0/SUCCESS)

    Feb 11 10:00:11 docker systemd[1]: Starting Open vSwitch...
    Feb 11 10:00:11 docker systemd[1]: Started Open vSwitch.

    [root@docker ~]# yum -y install bridge-utils


    [root@docker ~]# brctl show
    bridge name bridge id STP enabled interfaces
    docker0 8000.0242fabe521c no

    #创建一个新的网桥
    [root@docker ~]# ovs-vsctl add-br br0

    #将网络设备端口添加到桥接器
    [root@docker ~]# ovs-vsctl add-br br0
    [root@docker ~]# ovs-vsctl add-port br0 gre0 -- set interface gre0 type=gre option:remote_ip=192.168.20.135


    包括以下命令
    [root@docker ~]# ovs-vsctl show
    6fde4aed-708a-4ecc-882a-a415b3b3ac3d
    Bridge "br0"
    Port "br0"
    Interface "br0"
    type: internal
    Port "gre0"
    Interface "gre0"
    type: gre
    options: {remote_ip="192.168.20.135"}
    ovs_version: "2.5.0"


    #[root@docker ~]# ovs-vsctl del-br br0


    #添加br0到本地docker0,使容器流量通过openvswitc的隧道流出
    [root@docker ~]# brctl addif docker0 br0
    [root@docker ~]# brctl show
    bridge name bridge id STP enabled interfaces
    docker0 8000.0242fabe521c no br0


    [root@docker ~]# ip a
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
    valid_lft forever preferred_lft forever
    2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:50:56:84:42:8d brd ff:ff:ff:ff:ff:ff
    inet 192.168.20.209/24 brd 192.168.20.255 scope global ens32
    valid_lft forever preferred_lft forever
    inet6 fe80::250:56ff:fe84:428d/64 scope link
    valid_lft forever preferred_lft forever
    3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN
    link/ether 02:42:fa:be:52:1c brd ff:ff:ff:ff:ff:ff
    inet 10.0.1.1/24 scope global docker0
    valid_lft forever preferred_lft forever
    inet6 fe80::42:faff:febe:521c/64 scope link
    valid_lft forever preferred_lft forever
    422: ovs-system: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN
    link/ether c6:82:46:70:bc:d1 brd ff:ff:ff:ff:ff:ff
    424: gre0@NONE: <NOARP> mtu 1476 qdisc noop state DOWN
    link/gre 0.0.0.0 brd 0.0.0.0
    425: gretap0@NONE: <BROADCAST,MULTICAST> mtu 1462 qdisc noop state DOWN qlen 1000
    link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff
    426: br0: <BROADCAST,MULTICAST> mtu 1500 qdisc noop master docker0 state DOWN
    link/ether 8e:63:55:ec:3b:41 brd ff:ff:ff:ff:ff:ff

    #启用docker0 和 br0 网卡
    [root@docker ~]# ip link set dev br0 up
    [root@docker ~]# ip link set dev docker0 up

    添加一条路由,所有去10.0.0.0/8 的网络从docker0出
    [root@docker ~]# ip route add 10.0.0.0/8 dev docker0

    启动一个容器
    [root@docker ~]# docker images
    REPOSITORY TAG IMAGE ID CREATED SIZE
    centos7.3 20170204 1d6f132807d0 6 days ago 530 MB

    [root@docker ~]# docker run -d --name 209test centos7.3:20170204
    登录到容器中
    [root@docker ~]# docker exec -it 209test /bin/bash
    查看ip地址
    [root@464241f535e2 /]# ifconfig
    eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
    inet 10.0.1.2 netmask 255.255.255.0 broadcast 0.0.0.0
    inet6 fe80::42:aff:fe00:102 prefixlen 64 scopeid 0x20<link>
    ether 02:42:0a:00:01:02 txqueuelen 0 (Ethernet)
    RX packets 720 bytes 68496 (66.8 KiB)
    RX errors 0 dropped 0 overruns 0 frame 0
    TX packets 693 bytes 65706 (64.1 KiB)
    TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0

    lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
    inet 127.0.0.1 netmask 255.0.0.0
    inet6 ::1 prefixlen 128 scopeid 0x10<host>
    loop txqueuelen 0 (Local Loopback)
    RX packets 0 bytes 0 (0.0 B)
    RX errors 0 dropped 0 overruns 0 frame 0
    TX packets 0 bytes 0 (0.0 B)
    TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0

    #ping 192.168.20.135 服务器容器的IP地址
    [root@464241f535e2 /]# ping 10.0.2.2

    PING 10.0.2.2 (10.0.2.2) 56(84) bytes of data.
    64 bytes from 10.0.2.2: icmp_seq=1 ttl=63 time=0.451 ms
    64 bytes from 10.0.2.2: icmp_seq=2 ttl=63 time=0.493 ms
    From 10.0.1.1 icmp_seq=3 Redirect Host(New nexthop: 10.0.2.2)
    From 10.0.1.1: icmp_seq=3 Redirect Host(New nexthop: 10.0.2.2)

    出现以上问题,过一会,就恢复正常。


    ##############################################################################################


    192.168.20.135、223、224 服务器操作
    [root@registry ~]# rpm -ivh openvswitch-2.5.0-2.el7.x86_64.rpm
    warning: openvswitch-2.5.0-2.el7.x86_64.rpm: Header V4 DSA/SHA1 Signature, key ID fac8d3c0: NOKEY
    error: Failed dependencies:
    libatomic.so.1()(64bit) is needed by openvswitch-2.5.0-2.el7.x86_64

    [root@registry ~]# yum -y install libatomic


    [root@registry ~]# systemctl start openvswitch.service

    [root@registry ~]# systemctl status openvswitch.service
    ● openvswitch.service - Open vSwitch
    Loaded: loaded (/usr/lib/systemd/system/openvswitch.service; disabled; vendor preset: disabled)
    Active: active (exited) since Sat 2017-02-11 10:00:11 CST; 17s ago
    Process: 3854 ExecStart=/bin/true (code=exited, status=0/SUCCESS)
    Main PID: 3854 (code=exited, status=0/SUCCESS)

    Feb 11 10:00:11 registry systemd[1]: Starting Open vSwitch...
    Feb 11 10:00:11 registry systemd[1]: Started Open vSwitch.

    [root@registry ~]# yum -y install bridge-utils


    [root@registry ~]# brctl show
    bridge name bridge id STP enabled interfaces
    docker0 8000.0242fabe521c no

    #创建一个新的网桥
    [root@registry ~]# ovs-vsctl add-br br0

    #将网络设备端口添加到桥接器
    [root@registry ~]# ovs-vsctl add-br br0
    [root@registry ~]# ovs-vsctl add-port br0 gre0 -- set interface gre0 type=gre option:remote_ip=192.168.20.209


    包括以下命令,ovs-vsctl show 的结果与 192.168.20.209不一样
    [root@registry ~]# ovs-vsctl show
    19baf011-40aa-426c-a2b9-568101390834
    Bridge "br0"
    Port "gre0"
    Interface "gre0"
    type: gre
    options: {remote_ip="192.168.20.209"}
    Port "br0"
    Interface "br0"
    type: internal
    ovs_version: "2.5.0"

    ####删除br0 [root@registry ~]# ovs-vsctl del-br br0


    #添加br0到本地registry0,使容器流量通过openvswitc的隧道流出
    [root@registry ~]# brctl addif docker0 br0
    [root@registry ~]# brctl show
    bridge name bridge id STP enabled interfaces
    docker0 8000.0242fabe521c no br0


    [root@registry ~]# ip a
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
    valid_lft forever preferred_lft forever
    2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:50:56:84:2b:fc brd ff:ff:ff:ff:ff:ff
    inet 192.168.20.135/24 brd 192.168.20.255 scope global ens32
    valid_lft forever preferred_lft forever
    inet6 fe80::250:56ff:fe84:2bfc/64 scope link
    valid_lft forever preferred_lft forever
    3: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state DOWN
    link/ether 02:42:5e:5d:06:3f brd ff:ff:ff:ff:ff:ff
    inet 10.0.2.1/24 scope global docker0
    valid_lft forever preferred_lft forever
    inet6 fe80::42:5eff:fe5d:63f/64 scope link
    valid_lft forever preferred_lft forever
    28: ovs-system: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN
    link/ether f2:fd:f4:39:e2:20 brd ff:ff:ff:ff:ff:ff
    29: br0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state DOWN
    link/ether 0a:29:1e:93:37:41 brd ff:ff:ff:ff:ff:ff
    inet6 fe80::829:1eff:fe93:3741/64 scope link
    valid_lft forever preferred_lft forever


    #启用docker0 和 br0 网卡
    [root@registry ~]# ip link set dev br0 up
    [root@registry ~]# ip link set dev docker0 up

    添加一条路由,所有去10.0.0.0/8 的网络从docker0出
    [root@registry ~]# ip route add 10.0.0.0/8 dev docker0

    #########################################################################################

    192.168.20.223 服务器操作
    [root@docker223~]# rpm -ivh openvswitch-2.5.0-2.el7.x86_64.rpm 
    warning: openvswitch-2.5.0-2.el7.x86_64.rpm: Header V4 DSA/SHA1 Signature, key ID fac8d3c0: NOKEY
    error: Failed dependencies:
    libatomic.so.1()(64bit) is needed by openvswitch-2.5.0-2.el7.x86_64

    [root@docker223~]# yum -y install libatomic


    [root@docker223~]# systemctl start openvswitch.service

    [root@docker223~]# systemctl status openvswitch.service 
    ● openvswitch.service - Open vSwitch
    Loaded: loaded (/usr/lib/systemd/system/openvswitch.service; disabled; vendor preset: disabled)
    Active: active (exited) since Sat 2017-02-11 10:00:11 CST; 17s ago
    Process: 3854 ExecStart=/bin/true (code=exited, status=0/SUCCESS)
    Main PID: 3854 (code=exited, status=0/SUCCESS)

    Feb 11 10:00:11 docker223systemd[1]: Starting Open vSwitch...
    Feb 11 10:00:11 docker223systemd[1]: Started Open vSwitch.

    [root@docker223~]# yum -y install bridge-utils


    [root@docker223~]# brctl show
    bridge name bridge id STP enabled interfaces
    docker0 8000.0242fabe521c no

    #创建一个新的网桥
    [root@docker223~]# ovs-vsctl add-br br0

    #将网络设备端口添加到桥接器
    [root@docker223~]# ovs-vsctl add-br br0
    [root@docker223~]# ovs-vsctl add-port br0 gre1 -- set interface gre1 type=gre option:remote_ip=192.168.20.209


    包括以下命令,ovs-vsctl show 的结果与 192.168.20.209不一样
    [root@docker223~]# ovs-vsctl show

    8256b14a-1da6-4781-b9aa-7c6612ce7ebf
    Bridge "br0"
    Port "gre1"
    Interface "gre1"
    type: gre
    options: {remote_ip="192.168.20.209"}
    Port "br0"
    Interface "br0"
    type: internal
    ovs_version: "2.5.0"

    ####删除br0 [root@docker223~]# ovs-vsctl del-br br0


    #添加br0到本地registry0,使容器流量通过openvswitc的隧道流出
    [root@docker223~]# brctl addif docker0 br0
    [root@docker223~]# brctl show
    bridge name bridge id STP enabled interfaces
    docker0 8000.0242fabe521c no br0


    [root@docker223~]# ip a
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
    valid_lft forever preferred_lft forever
    2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:50:56:84:2b:fc brd ff:ff:ff:ff:ff:ff
    inet 192.168.20.223/24 brd 192.168.20.255 scope global ens32
    valid_lft forever preferred_lft forever
    inet6 fe80::250:56ff:fe84:2bfc/64 scope link 
    valid_lft forever preferred_lft forever
    3: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state DOWN 
    link/ether 02:42:5e:5d:06:3f brd ff:ff:ff:ff:ff:ff
    inet 10.0.3.1/24 scope global docker0
    valid_lft forever preferred_lft forever
    inet6 fe80::42:5eff:fe5d:63f/64 scope link 
    valid_lft forever preferred_lft forever
    28: ovs-system: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN 
    link/ether f2:fd:f4:39:e2:20 brd ff:ff:ff:ff:ff:ff
    29: br0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state DOWN 
    link/ether 0a:29:1e:93:37:41 brd ff:ff:ff:ff:ff:ff
    inet6 fe80::829:1eff:fe93:3741/64 scope link 
    valid_lft forever preferred_lft forever


    #启用docker0 和 br0 网卡
    [root@docker223~]# ip link set dev br0 up
    [root@docker223~]# ip link set dev docker0 up

    添加一条路由,所有去10.0.0.0/8 的网络从docker0出
    [root@docker223~]# ip route add 10.0.0.0/8 dev docker0

    #########################################################################################

    192.168.20.224 服务器操作
    [root@docker224~]# rpm -ivh openvswitch-2.5.0-2.el7.x86_64.rpm 
    warning: openvswitch-2.5.0-2.el7.x86_64.rpm: Header V4 DSA/SHA1 Signature, key ID fac8d3c0: NOKEY
    error: Failed dependencies:
    libatomic.so.1()(64bit) is needed by openvswitch-2.5.0-2.el7.x86_64

    [root@docker224~]# yum -y install libatomic


    [root@docker224~]# systemctl start openvswitch.service

    [root@docker224~]# systemctl status openvswitch.service 
    ● openvswitch.service - Open vSwitch
    Loaded: loaded (/usr/lib/systemd/system/openvswitch.service; disabled; vendor preset: disabled)
    Active: active (exited) since Sat 2017-02-11 10:00:11 CST; 17s ago
    Process: 3854 ExecStart=/bin/true (code=exited, status=0/SUCCESS)
    Main PID: 3854 (code=exited, status=0/SUCCESS)

    Feb 11 10:00:11 docker223systemd[1]: Starting Open vSwitch...
    Feb 11 10:00:11 docker223systemd[1]: Started Open vSwitch.

    [root@docker223~]# yum -y install bridge-utils


    [root@docker224~]# brctl show
    bridge name bridge id STP enabled interfaces
    docker0 8000.0242fabe521c no

    #创建一个新的网桥
    [root@docker224~]# ovs-vsctl add-br br0

    #将网络设备端口添加到桥接器
    [root@docker224~]# ovs-vsctl add-br br0
    [root@docker224~]# ovs-vsctl add-port br0 gre1 -- set interface gre1 type=gre option:remote_ip=192.168.20.209


    包括以下命令,ovs-vsctl show 的结果与 192.168.20.209不一样
    [root@docker224~]# ovs-vsctl show

    8256b14a-1da6-4781-b9aa-7c6612ce7ebf
    Bridge "br0"
    Port "gre2"
    Interface "gre2"
    type: gre
    options: {remote_ip="192.168.20.209"}
    Port "br0"
    Interface "br0"
    type: internal
    ovs_version: "2.5.0"

    ####删除br0 [root@docker224~]# ovs-vsctl del-br br0


    #添加br0到本地docker0,使容器流量通过openvswitc的隧道流出
    [root@docker224~]# brctl addif docker0 br0
    [root@docker224~]# brctl show
    bridge name bridge id STP enabled interfaces
    docker0 8000.0242fabe521c no br0


    [root@docker224~]# ip a
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
    valid_lft forever preferred_lft forever
    2: ens32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:50:56:84:2b:fc brd ff:ff:ff:ff:ff:ff
    inet 192.168.20.224/24 brd 192.168.20.255 scope global ens32
    valid_lft forever preferred_lft forever
    inet6 fe80::250:56ff:fe84:2bfc/64 scope link 
    valid_lft forever preferred_lft forever
    3: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state DOWN 
    link/ether 02:42:5e:5d:06:3f brd ff:ff:ff:ff:ff:ff
    inet 10.0.4.1/24 scope global docker0
    valid_lft forever preferred_lft forever
    inet6 fe80::42:5eff:fe5d:63f/64 scope link 
    valid_lft forever preferred_lft forever
    28: ovs-system: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN 
    link/ether f2:fd:f4:39:e2:20 brd ff:ff:ff:ff:ff:ff
    29: br0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state DOWN 
    link/ether 0a:29:1e:93:37:41 brd ff:ff:ff:ff:ff:ff
    inet6 fe80::829:1eff:fe93:3741/64 scope link 
    valid_lft forever preferred_lft forever


    #启用docker0 和 br0 网卡
    [root@docker224~]# ip link set dev br0 up
    [root@docker224~]# ip link set dev docker0 up

    添加一条路由,所有去10.0.0.0/8 的网络从docker0出
    [root@docker224~]# ip route add 10.0.0.0/8 dev docker0

     总结,192.168.20.209 在br0 上添加 gre0 gre1 gre2 ,对应关系如下, 而192.168.20.135 br0添加gre0, 192.168.20.223 添加gre1, 192.168.20.224 添加gre2, 之后分别启动容器,不同容器的网段 可以相互ping通。

      gr0 192.168.20.135
      gr1 192.168.20.223
      gr2 192.168.20.224

    192.168.20.209自启动脚本

    #!/bin/bash

    systemctl start openvswitch.service

    systemctl enable openvswitch.service

    ovs-vsctl add-br br0
    ovs-vsctl add-port br0 gre0 -- set interface gre0 type=gre option:remote_ip=192.168.20.135
    ovs-vsctl add-port br0 gre1 -- set interface gre1 type=gre option:remote_ip=192.168.20.223
    ovs-vsctl add-port br0 gre2 -- set interface gre2 type=gre option:remote_ip=192.168.20.224

    ovs-vsctl show

    brctl show

    brctl addif docker0 br0

    ip link set dev br0 up

    ip link set dev docker0 up

    ip route add 10.0.0./8 dev docker0

     192.168.20.135自启动脚本

    systemctl start openvswitch.service

    systemctl enable openvswitch.service

    ovs-vsctl add-br br0
    ovs-vsctl add-port br0 gre0 -- set interface gre0 type=gre option:remote_ip=192.168.20.209

    ovs-vsctl show

    brctl show

    brctl addif docker0 br0

    ip link set dev br0 up

    ip link set dev docker0 up

    ip route add 10.0.0.0/8 dev docker0

     192.168.20.223自启动脚本

    systemctl start openvswitch.service

    systemctl enable openvswitch.service

    ovs-vsctl add-br br0
    ovs-vsctl add-port br0 gre1 -- set interface gre1 type=gre option:remote_ip=192.168.20.209

    ovs-vsctl show

    brctl show

    brctl addif docker0 br0

    ip link set dev br0 up

    ip link set dev docker0 up

    ip route add 10.0.0.0/8 dev docker0

     192.168.20.224自启动脚本

    systemctl start openvswitch.service

    systemctl enable openvswitch.service

    ovs-vsctl add-br br0
    ovs-vsctl add-port br0 gre1 -- set interface gre1 type=gre option:remote_ip=192.168.20.209

    ovs-vsctl show

    brctl show

    brctl addif docker0 br0

    ip link set dev br0 up

    ip link set dev docker0 ip

    ip route add 10.0.0.0/8 dev docker0



  • 相关阅读:
    1.Python基础语法
    Python学习4:商城购物
    Python学习3:猜年龄游戏进阶版
    Python学习2:猜年龄游戏
    python学习1:判断学生成绩等级
    K8S集群平滑回退或升级
    Xtrabackup工作原理
    Android App 侧边栏菜单的简单实现
    NoActionBar主题下如何添加OptionsMenu
    TabLayout+ViewPager制作简单导航栏
  • 原文地址:https://www.cnblogs.com/fengjian2016/p/6253039.html
Copyright © 2011-2022 走看看