zoukankan      html  css  js  c++  java
  • 【工作相关】个人常用脚本及代码

    1. CMD

    right_cmd_command_regit.bat

    @echo off
    reg add "HKCR*shellms-dos" /ve /d DOS_COMMAND /f
    reg add "HKCR*shellms-doscommand" /ve /d "cmd.exe /k cd %%1" /f
    reg add "HKCRFoldershellms-dos" /ve /d DOS_COMMAND /f
    reg add "HKCRFoldershellms-doscommand" /ve /d "cmd.exe /k cd %%1" /f
    View Code

    2.Shell

    2.1 pmdownload

    #!/bin/sh
    
    # -------------------------------------------------------------------------------
    # pmdownload -
    #       This file contains PM download script sample
    #
    # Note: The output is written to file "pmdownload.out"
    
    # FTP parameters
    #
    # Example:
    #   hostName=192.168.1.1
    #   userName=user1
    #   password=123
    #   remoteDir=/home/user1/pmfiles
    
    hostName=ftp_hostname
    userName=ftp_username
    password=ftp_password
    remoteDir=ftp_remote_directory
    csvSeparator=","
    
    # -------------------------------------------------------------------------------
    # Getting CMS bin directory
    
    pmcmd=$0
    
    cmsbin=`dirname $pmcmd`
    
    if [ $pmcmd = $cmsbin ]; then
        cmsbin="."
    fi
    cd $cmsbin
    
    # -------------------------------------------------------------------------------
    # Get yesterday, today and day before yesterday date strings
    
    year=`date +%Y`
    month=`date +%m`
    day=`date +%d`
    
    day=`expr "$day" - 1`
    
    if [ $day -eq 0 ]; then
        month=`expr "$month" - 1`
        month="0${month}"
        if [ $month -eq 0 ]; then
            month=12
            year=`expr "$year" - 1`
        fi
        day=`cal $month $year | grep . | fmt -1 | tail -1`
    fi
    
    if [ $day -lt 10 ]; then
        day="0${day}"
    fi
    
    year=`echo ${year} | sed 's/^20//'`
    
    # -------------------------------------------------------------------------------
    # Get day before yesterday date strings
    
    year2=`date +%Y`
    month2=`date +%m`
    day2=`date +%d`
    
    day2=`expr "$day2" - 2`
    
    if [ $day2 -eq 0 ]; then
        month2=`expr "$month2" - 1`
        month2="0${month2}"
        if [ $month2 -eq 0 ]; then
            month2=12
            year2=`expr "$year2" - 1`
        fi
        day2=`cal $month2 $year2 | grep . | fmt -1 | tail -1`
    fi
    
    if [ $day2 -lt 10 ]; then
        day2="0${day2}"
    fi
    
    year2=`echo ${year2} | sed 's/^20//'`
    
    # --------------------------------------
    # yesterdayDate format -> MM/DD/YY
    
    yesterdayDate=${month}/${day}/${year}
    
    # --------------------------------------
    # todayDate format -> MM/DD/YY
    
    todayDate=`date +'%m/%d/%y'`
    
    # --------------------------------------
    # beforeYesterdayDate format -> MM/DD/YY
    
    beforeYesterdayDate=${month2}/${day2}/${year2}
    
    # --------------------------------------
    # 15-MIN Start and End time for getpm.sh script
    
    startTime15min="${yesterdayDate} 00:00:00"
    endTime15min="${yesterdayDate} 23:59:59"
    
    # --------------------------------------
    # 1-DAY Start and End time for getpm.sh script
    
    startTime1day="${beforeYesterdayDate} 00:00:00"
    endTime1day="${beforeYesterdayDate} 23:59:59"
    
    # --------------------------------------
    # 15-MIN fileDate format -> MMDDYY
    
    fileDate=${month}${day}${year}
    
    # --------------------------------------
    # 1-DAY fileDate format -> MMDDYY
    
    fileDate2=${month2}${day2}${year2}
    
    # --------------------------------------
    # Hostname
    
    file_hostname=`hostname`
    
    # -------------------------------------------------------------------------------
    # Output files
    
    outputFiles()
    {
        filesize=0
        if [ -f pmdownload.out ]; then
            filesize=`du -k pmdownload.out|awk '{print $1}'`
            if [ "$filesize" -gt 300 ]; then
                if [ -f pmdownload.out4 ]; then
                    cp pmdownload.out4 pmdownload.out5
                fi
                if [ -f pmdownload.out3 ]; then
                    cp pmdownload.out3 pmdownload.out4
                fi
                if [ -f pmdownload.out2 ]; then
                    cp pmdownload.out2 pmdownload.out3
                fi
                if [ -f pmdownload.out1 ]; then
                    cp pmdownload.out1 pmdownload.out2
                fi
                cp pmdownload.out pmdownload.out1
                rm -f pmdownload.out
           fi
        fi
    }
    
    # ------------------------------------------------------------------------------
    # Get 15-MIN PM data
    
    getpm_15MIN()
    {
        pmtype=$1
        pmtype1=`echo $pmtype | sed 's/15MIN//'`
        ##echo "pmtype1 ="  $pmtype1
        pmlocation=$2
    
    
        if [ "$pmlocation" = "" ]
        then
            pmloc="BOTH"
        fi
    
        if [ "$pmlocation" = "NEND" ]
        then
            pmloc="2"
        fi
    
        if [ "$pmlocation" = "FEND" ]
        then
            pmloc="1"
        fi
    
        outputFiles
        # ----------------------------------------------------
        # Execute getpm to retrieve 15-MIN PM data
    
        timeStamp=`date +'%m%d%y_%H%M%S'`
        timenow=`date +'%m/%d/%y %H:%M:%S'`
    
        filenamePM="pm_"$pmtype1"_15MIN_${timeStamp}.csv"
        ##echo " FilenamePM = " $filenamePM
    
        if [ "$pmloc" = "BOTH" ]
        then
            pmcmd="${cmsbin}/getpm.sh -p $pmtype"
            pmcmd="${pmcmd} -s "${startTime15min}" -e "${endTime15min}""
            pmcmd="${pmcmd} -f csv -o $filenamePM -rs $csvSeparator"
        fi
        if [ "$pmloc" != "BOTH" ]
        then
            pmcmd="${cmsbin}/getpm.sh -p $pmtype"
            pmcmd="${pmcmd} -s "${startTime15min}" -e "${endTime15min}""
            pmcmd="${pmcmd} -l $pmloc -f csv -o $filenamePM -rs $csvSeparator"
        fi
    
        echo "">>pmdownload.out
        echo "# ---------------------------------------------------">>pmdownload.out
        echo "# Execute getpm $1 15-MIN at ${timenow}">>pmdownload.out
        echo "# ---------------------------------------------------">>pmdownload.out
        echo "">>pmdownload.out
        echo ">>${pmcmd}" >>pmdownload.out
    
        if [ "$pmloc" = "BOTH" ]
        then
            ${cmsbin}/getpm.sh -p $pmtype -s "${startTime15min}" -e "${endTime15min}" 
            -f csv -o $filenamePM -rs $csvSeparator >>pmdownload.out
        fi
        if [ "$pmloc" != "BOTH" ]
        then
            ${cmsbin}/getpm.sh -p $pmtype -s "${startTime15min}" -e "${endTime15min}" 
            -l $pmloc -f csv -o $filenamePM -rs $csvSeparator >>pmdownload.out
        fi
    
        # ----------------------------------------------------
        # Transfer the 15-MIN PM output file using cftp script
    
        if [ -f $filenamePM ]; then
           filenameFTP="${file_hostname}_pm_${pmtype1}_15MIN_${fileDate}_000000.csv"
           echo "">>pmdownload.out
           echo ">>Rename file ${filenamePM} to ${filenameFTP}">>pmdownload.out
           cp ${filenamePM} ${filenameFTP}
           rm ${filenamePM}
    
           localFile="${cmsbin}/${filenameFTP}"
           newlocalFile="newlocalFile"    
        awk "NR==1" ${localFile}>${newlocalFile}      
         if [ "$pmtype1" = "GEPort15Min" ]||[ "$pmtype1" = "10GEPort15Min" ]||[ "$pmtype1" = "EthDsl15Min" ]||[ "$pmtype1" = "PON15Min" ]
        then
               tac ${localFile} | awk -F$csvSeparator '!_[$3,$6,$10]++' | tac >>${newlocalFile}
            localFile="${newlocalFile}"
        fi
            if [ "$pmtype1" = "DSL15Min" ]
        then
           tac ${localFile} | awk -F$csvSeparator '!_[$3,$43,$45]++' | tac >>${newlocalFile} 
           localFile="${newlocalFile}"
        fi
        if [ "$pmtype1" = "ERPS15Min" ]
        then
               tac ${localFile} | awk -F$csvSeparator '!_[$3,$6,$27]++' | tac >>${newlocalFile}
           localFile="${newlocalFile}"
        fi
        if [ "$pmtype1" = "ONT15Min" ]||[ "$pmtype1" = "OntDs115Min" ]||[ "$pmtype1" = "ONTPWE315Min" ]
        then    
           tac ${localFile} | awk -F$csvSeparator '!_[$3,$5,$8]++' | tac >>${newlocalFile}
           localFile="${newlocalFile}"
        fi
        if [ "$pmtype1" = "OntEthFe15Min" ]||[ "$pmtype1" = "OntEthGe15Min" ]||[ "$pmtype1" = "OntEthHpna15Min" ]
        then
           tac ${localFile} | awk -F$csvSeparator '!_[$3,$6,$9]++' | tac >>${newlocalFile}
           localFile="${newlocalFile}"
         fi
    
           remoteFile="${remoteDir}/${filenameFTP}"
           ftpcmd="${cmsbin}/cftp -h $hostName -u $userName -p $password"
           ftpcmd="${ftpcmd} $localFile $remoteFile"
    
           timenow=`date +'%m/%d/%y %H:%M:%S'`
           echo "">>pmdownload.out
           echo "# ---------------------------------------------------">>pmdownload.out
           echo "# Transfer $1 15-MIN file at ${timenow}">>pmdownload.out
           echo "# ---------------------------------------------------">>pmdownload.out
           echo "">>pmdownload.out
           echo ">>${ftpcmd}" >>pmdownload.out
    
           ${ftpcmd} >>pmdownload.out
           rm -f $localFile
           rm -f $newlocalFile
        fi
    }
    
    # ------------------------------------------------------------------------------
    # Get both 1-DAY PM data
    
    getpm_1DAY()
    {
        pmtype=$1
        pmtype1=`echo $pmtype | sed 's/1DAY//'`
        ##echo "pmtype1 = " $pmtype1
        pmlocation=$2
    
        if [ "$pmlocation" = "" ]
        then
            pmloc="BOTH"
        fi
    
        if [ "$pmlocation" = "NEND" ]
        then
            pmloc="2"
        fi
    
        if [ "$pmlocation" = "FEND" ]
        then
            pmloc="1"
        fi
    
        outputFiles
        # ----------------------------------------------------
        # Execute getpm to retrieve 1-DAY PM data
    
        timeStamp=`date +'%m%d%y_%H%M%S'`
        timenow=`date +'%m/%d/%y %H:%M:%S'`
    
        filenamePM="pm_"$pmtype1"_1DAY_${timeStamp}.csv"
        ##echo "filenamePM = " $filenamePM
    
        if [ "$pmloc" = "BOTH" ]
        then
            pmcmd="${cmsbin}/getpm.sh -p $pmtype"
            pmcmd="${pmcmd} -s "${startTime1day}" -e "${endTime1day}""
            pmcmd="${pmcmd} -f csv -o $filenamePM -rs $csvSeparator"
        fi
    
        if [ "$pmloc" != "BOTH" ]
        then
            pmcmd="${cmsbin}/getpm.sh -p $pmtype"
            pmcmd="${pmcmd} -s "${startTime1day}" -e "${endTime1day}""
            pmcmd="${pmcmd} -l $pmloc -f csv -o $filenamePM -rs $csvSeparator"
        fi
    
        echo "">>pmdownload.out
        echo "# ---------------------------------------------------">>pmdownload.out
        echo "# Execute getpm $1 1-DAY at ${timenow}">>pmdownload.out
        echo "# ---------------------------------------------------">>pmdownload.out
        echo "">>pmdownload.out
        echo ">>${pmcmd}" >>pmdownload.out
    
        if [ "$pmloc" = "BOTH" ]
        then
            ${cmsbin}/getpm.sh -p $pmtype -s "${startTime1day}" -e "${endTime1day}" 
            -f csv -o $filenamePM -rs $csvSeparator >>pmdownload.out
        fi
        if [ "$pmloc" != "BOTH" ]
        then
            ${cmsbin}/getpm.sh -p $pmtype -s "${startTime1day}" -e "${endTime1day}" 
            -l $pmloc -f csv -o $filenamePM -rs $csvSeparator >>pmdownload.out
        fi
    
        # ----------------------------------------------------
        # Transfer the 1-DAY PM output file using cftp script
    
        if [ -f $filenamePM ]; then
           filenameFTP="${file_hostname}_pm_${pmtype1}_1DAY_${fileDate2}_000000.csv"
           echo "">>pmdownload.out
           echo ">>Rename file ${filenamePM} to ${filenameFTP}">>pmdownload.out
           cp ${filenamePM} ${filenameFTP}
           rm ${filenamePM}
    
           localFile="${cmsbin}/${filenameFTP}"
           remoteFile="${remoteDir}/${filenameFTP}"
           ftpcmd="${cmsbin}/cftp -h $hostName -u $userName -p $password"
           ftpcmd="${ftpcmd} $localFile $remoteFile"
    
           timenow=`date +'%m/%d/%y %H:%M:%S'`
           echo "">>pmdownload.out
           echo "# ---------------------------------------------------">>pmdownload.out
           echo "# Transfer $1 1-DAY file at ${timenow}">>pmdownload.out
           echo "# ---------------------------------------------------">>pmdownload.out
           echo "">>pmdownload.out
           echo ">>${ftpcmd}" >>pmdownload.out
    
           ${ftpcmd} >>pmdownload.out
           rm -f $localFile
        fi
    }
    
    
    getpm_adsl()
    {
        pmtype=$1
        pmlocation=$2
    
        if [ "$pmlocation" = "" ]
        then
            pmloc="BOTH"
        fi
    
        if [ "$pmlocation" = "NEND" ]
        then
            pmloc="2"
        fi
    
        if [ "$pmlocation" = "FEND" ]
        then
            pmloc="1"
        fi
    
        outputFiles
        # ----------------------------------------------------
        # Execute getpm to retrieve 15-MIN PM data
    
        timeStamp=`date +'%m%d%y_%H%M%S'`
        timenow=`date +'%m/%d/%y %H:%M:%S'`
    
        filenamePM="pm_${pmtype}_${timeStamp}.csv"
        echo " FilenamePM = " $filenamePM
    
        if [ "$pmloc" = "BOTH" ]
        then
            pmcmd="${cmsbin}/getpm.sh -p $pmtype"
            pmcmd="${pmcmd} -s "${startTime15min}" -e "${endTime15min}""
            pmcmd="${pmcmd} -f csv -o $filenamePM -rs $csvSeparator"
        fi
        if [ "$pmloc" != "BOTH" ]
        then
            pmcmd="${cmsbin}/getpm.sh -p $pmtype"
            pmcmd="${pmcmd} -s "${startTime15min}" -e "${endTime15min}""
            pmcmd="${pmcmd} -l $pmloc -f csv -o $filenamePM -rs $csvSeparator"
        fi
    
        echo "">>pmdownload.out
        echo "# ---------------------------------------------------">>pmdownload.out
        echo "# Execute getpm $1 15-MIN at ${timenow}">>pmdownload.out
        echo "# ---------------------------------------------------">>pmdownload.out
        echo "">>pmdownload.out
        echo ">>${pmcmd}" >>pmdownload.out
    
        if [ "$pmloc" = "BOTH" ]
        then
            ${cmsbin}/getpm.sh -p $pmtype -s "${startTime15min}" -e "${endTime15min}" 
            -f csv -o $filenamePM -rs $csvSeparator >>pmdownload.out
        fi
        if [ "$pmloc" != "BOTH" ]
        then
            ${cmsbin}/getpm.sh -p $pmtype -s "${startTime15min}" -e "${endTime15min}" 
            -l $pmloc -f csv -o $filenamePM -rs $csvSeparator >>pmdownload.out
        fi
    
        # ----------------------------------------------------
        # Transfer the 15-MIN PM output file using cftp script
    
        if [ -f $filenamePM ]; then
           filenameFTP="${file_hostname}_pm_${pmtype1}_15MIN_${fileDate}_000000.csv"
           echo "">>pmdownload.out
           echo ">>Rename file ${filenamePM} to ${filenameFTP}">>pmdownload.out
           cp ${filenamePM} ${filenameFTP}
           rm ${filenamePM}
           localFile="${cmsbin}/${filenameFTP}"      
           remoteFile="${remoteDir}/${filenameFTP}"
           ftpcmd="${cmsbin}/cftp -h $hostName -u $userName -p $password"
           ftpcmd="${ftpcmd} $localFile $remoteFile"
    
           timenow=`date +'%m/%d/%y %H:%M:%S'`
           echo "">>pmdownload.out
           echo "# ---------------------------------------------------">>pmdownload.out
           echo "# Transfer $1 15-MIN file at ${timenow}">>pmdownload.out
           echo "# ---------------------------------------------------">>pmdownload.out
           echo "">>pmdownload.out
           echo ">>${ftpcmd}" >>pmdownload.out
    
           ${ftpcmd} >>pmdownload.out
           rm -f $localFile
        fi
    }
    # -------------------------------------------------------------------------------
    # Download PM data
    
    getpmdata1Day()
    {
        #This will download both NEND and FEND PM in the same file
        getpm_1DAY $1
    
        #This will download only NEND PM
        #getpm_1DAY  $1 NEND
    
        #This will download only FEND PM
        #getpm_1DAY  $1 FEND
    }
    
    getpmdataadsl()
    {
        #This will download both NEND and FEND PM in the same file
        getpm_adsl $1
    
        #This will download only NEND PM
        #getpm_adsl $1 NEND
    
        #This will download only FEND PM
        #getpm_adsl $1 FEND
    }
    
    getpmdata15Min()
    {
        #This will download both NEND and FEND PM in the same file
        getpm_15MIN $1
    
        #This will download only NEND PM
        #getpm_15MIN $1 NEND
    
        #This will download only FEND PM
        #getpm_15MIN $1 FEND
    }
    
    # -------------------------------------------------------------------------------
    # Start downloading PM data from each facility type
    # To disable the download, add "#" character before "getpmdata"
    
    getpmdata1Day OC481DAY
    getpmdata1Day OC121DAY
    getpmdata1Day OC31DAY
    getpmdata1Day STS48c1DAY
    getpmdata1Day STS12c1DAY
    getpmdata1Day STS3c1DAY
    getpmdata1Day STS11DAY
    getpmdata1Day DS31DAY
    getpmdata1Day DS11DAY
    getpmdata1Day IMA1DAY
    getpmdata1Day IMALink1DAY
    getpmdata1Day ADSL1DAY
    getpmdata1Day HDSL1DAY
    getpmdata1Day XDSL1DAY
    getpmdata1Day XDSLGroup1DAY
    
    getpmdata15Min OC4815MIN
    getpmdata15Min OC1215MIN
    getpmdata15Min OC315MIN
    getpmdata15Min STS48c15MIN
    getpmdata15Min STS12c15MIN
    getpmdata15Min STS3c15MIN
    getpmdata15Min STS115MIN
    getpmdata15Min DS315MIN
    getpmdata15Min DS115MIN
    getpmdata15Min IMA15MIN
    getpmdata15Min IMALink15MIN
    getpmdata15Min ADSL15MIN
    getpmdata15Min HDSL15MIN
    getpmdata15Min XDSL15MIN
    getpmdata15Min XDSLGroup15MIN
    
    ## By default the following are 15 Min Data only
    getpmdata15Min ETHERNET
    getpmdataadsl ADSLCALLSTATUS
    getpmdataadsl XDSLCALLSTATUS
    getpmdataadsl XDSLGROUPCALLSTATUS
    
    #------------------------------------------
    ##PM for E5312/E5400/E7
    
    getpmdata1Day GEPort1Day
    getpmdata1Day 10GEPort1Day
    getpmdata1Day ERPS1Day
    getpmdata1Day OntEthGe1Day
    getpmdata1Day OntEthFe1Day
    getpmdata1Day OntEthHpna1Day
    getpmdata1Day OntDs11Day
    getpmdata1Day ONT1Day
    getpmdata1Day ONTPWE31Day
    getpmdata1Day EthDsl1Day
    getpmdata1Day DSL1Day
    getpmdata1Day PON1Day
    
    getpmdata15Min GEPort15Min
    getpmdata15Min 10GEPort15Min
    getpmdata15Min ERPS15Min
    getpmdata15Min OntEthGe15Min
    getpmdata15Min OntEthFe15Min
    getpmdata15Min OntEthHpna15Min
    getpmdata15Min OntDs115Min
    getpmdata15Min ONT15Min
    getpmdata15Min ONTPWE315Min
    getpmdata15Min EthDsl15Min
    getpmdata15Min DSL15Min
    getpmdata15Min PON15Min
    View Code

    2.2 login.expect

    #!/usr/bin/expect
    set host [lindex $argv 0 ]
    set user [lindex $argv 1 ]
    set password [lindex $argv 2 ]
    set prompt [lindex $argv 3 ]
    set timeout 10
    spawn ssh -p 1035 $user@$host
    expect {
       "*yes/no*" {send "yes
    ";exp_continue}
       "*assword:" {send "$password
    "}
    }
    interact
    View Code

    2.3 fulfill pmgcs data

    2.3.1 daily_pm_check_missing_data_and_recover.sh

    #!/bin/bash
    REDSHIFT_HOST="xxx"
    REDSHIFT_PORT=5439
    REDSHIFT_DB="xxx"
    REDSHIFT_USER="xxx"
    REDSHIFT_PSWD="xxx"
    start_date=$1
    if [ -z $start_date ];then
       start_date=`date --date="-7 day" +%Y-%m-%d`
    fi
    #end_date=$(date -d "$start_date 1 day"  +%Y-%m-%d)
    #end_date=`date --date="-1 day" +%Y-%m-%d`
    end_date=`date +%Y-%m-%d`
    start_time=$(date -d "$start_date" +%s)
    end_time=$(date -d $end_date +%s)
    echo "start_date: $start_date end_date:$end_date"
    echo "start_time: $start_time end_time: $end_time"
    
    rm -rf missing_pm.sql
    cp missing_pm.sql.tmp missing_pm.sql
    sed -i "s/START_TIME/${start_time}/g" missing_pm.sql
    sed -i "s/END_TIME/${end_time}/g" missing_pm.sql
    #unload_sql="`cat missing_pm.sql`"
    #echo $unload_sql
    
    export  PGPASSWORD=$REDSHIFT_PSWD
    psql -h $REDSHIFT_HOST -p $REDSHIFT_PORT -d $REDSHIFT_DB -U $REDSHIFT_USER -f missing_pm.sql >result.txt
    
    netops@tonyben-dev:~/pmgcs$ ca daily_pm_check_missing_data_and_recover.sh
    ca: command not found
    netops@tonyben-dev:~/pmgcs$ cat daily_pm_check_missing_data_and_recover.sh
    #!/bin/bash
    BASEDIR=$(dirname $0)
    log="$BASEDIR/daily_pm_check_missing_data_and_recover.`date +'%Y_%m_%d'`.log"
    cd $BASEDIR
    if [ -f /tmp/daily_pm_check_missing_data_and_recover.lock ];then
       echo "$0 is in process , will exist"|tee -a $log
       exit
    fi
    touch /tmp/daily_pm_check_missing_data_and_recover.lock
    echo "[`date +'%Y-%m-%d %H:%M:%S'`] Begin to load missing devices"|tee -a $log
    ./check_pm_missing_data.sh|tee -a $log
    for org in `cat result.txt|awk '{print $1}'|sort|uniq`
    do
      if [ -n "$org" ] && [ "$org" -eq "$org" ] 2>/dev/null; then
        echo "$org need process"
        cat result.txt|grep $org>_result_${org}
      fi
    done
    rm _result_50
    echo "[`date +'%Y-%m-%d %H:%M:%S'`] Begin to process missing devices"|tee -a $log
    for s in `ls _result_*`
    do
       while [ `ps -ef|grep pm_missing_data_move.py|grep -v grep|wc -l` -gt 3 ]
       do
          sleep 1s
       done
       if [ -f $s ];then
         if [ `ps -ef|grep -v grep|grep $s|wc -l` -eq 0 ];then
             mv $s run_$s
             nohup python pm_missing_data_move.py -e tony.ben@calix.com --password xxx --filename run_$s && mv run_$s done_${s} &
         fi
       fi
    #  nohup python pm_missing_data_move.py -e tony.ben@calix.com --password xxx --filename $s &
    done
    
    #python pm_missing_data_move.py -e tony.ben@calix.com --password xxx $1|tee -a $log
    echo "[`date +'%Y-%m-%d %H:%M:%S'`] End"|tee -a $log
    rm -rf result.txt.bak
    #cp result.txt result.txt.bak
    mv result.txt result.txt.`date +'%Y_%m_%d'`
    log_file="s3_daily_pm_check_missing_data_`date +'%Y_%m_%d'`.log"
    cat ${log_file}*|grep match|awk '{print $5,$7,$6}'>tmp.log
    sed -i 's/orgid//g' tmp.log
    sed -i 's/file_name//g' tmp.log
    sed -i 's/date_time//g' tmp.log
    sed -i 's/://g' tmp.log
    parm="fullsync"
    if [ -z $1 ];then
       parm="delta"
    fi
    subject="[`date +'%Y-%m-%d %H:%M:%S'`] [$parm] Daily PM Files RE-SYNC STATICS"
    cat tmp.log |awk '{print $1}'|sort|uniq -c |sort -n>tmp
    body=""
    while IFS= read -r line
    do
      body="$body<li>$line</li>"
    done < tmp
    #python sendEmail.py -e tony.ben@calix.com -s "$subject" -b "$body" -a tmp.log --password xxx
    python sendEmail.py -e tony.ben@calix.com -s "$subject" -b "$body" --password xxx
    mv tmp.log tmp.log.`date +'%Y_%m_%d'`
    tar -zcvf s3_daily_pm_check_missing_data_`date +'%Y_%m_%d_%H_%M'`.log.tar.gz ${log_file}*
    rm -rf ${log_file}*
    rm -rf /tmp/daily_pm_check_missing_data_and_recover.lock
    View Code

    2.3.2 check_pm_missing_data.sh

    #!/bin/bash
    REDSHIFT_HOST="xx"
    REDSHIFT_PORT=5439
    REDSHIFT_DB="xx"
    REDSHIFT_USER="masteruser"
    REDSHIFT_PSWD="xx
    start_date=$1
    if [ -z $start_date ];then
       start_date=`date --date="-7 day" +%Y-%m-%d`
    fi
    #end_date=$(date -d "$start_date 1 day"  +%Y-%m-%d)
    #end_date=`date --date="-1 day" +%Y-%m-%d`
    end_date=`date +%Y-%m-%d`
    start_time=$(date -d "$start_date" +%s)
    end_time=$(date -d $end_date +%s)
    echo "start_date: $start_date end_date:$end_date"
    echo "start_time: $start_time end_time: $end_time"
    
    rm -rf missing_pm.sql
    cp missing_pm.sql.tmp missing_pm.sql
    sed -i "s/START_TIME/${start_time}/g" missing_pm.sql
    sed -i "s/END_TIME/${end_time}/g" missing_pm.sql
    #unload_sql="`cat missing_pm.sql`"
    #echo $unload_sql
    
    export  PGPASSWORD=$REDSHIFT_PSWD
    psql -h $REDSHIFT_HOST -p $REDSHIFT_PORT -d $REDSHIFT_DB -U $REDSHIFT_USER -f missing_pm.sql >result.txt
    View Code

    2.3.3 fulfill_data.sh

    #!/bin/bash
    orgid=$1
    device=$2
    datelist=$3
    process_file=$4
    thread=$5
    bucket_prefix="s3://gcs.calix.com-wifi-pm-per-"
    log_file="s3_daily_pm_check_missing_data_`date +'%Y_%m_%d'`.log"
    if [ -x $orgid ];then
       echo "No OrgID"
       exit
    fi
    if [ -x $device ];then
       echo "No Device"
       exit
    fi
    #echo "Call fulfill_data $orgid $device $process_file $thread"
    #echo "Begin process org: $orgid ,device: $device ,missing date:$datelist"|tee -a $log_file
    for bucket in radio sta
    do
       for folder in backup
       do
          for f in `aws s3 ls ${bucket_prefix}$bucket/$folder/$orgid/$device|awk '{print $4}'`
          do
             fnocsv=${f%%.csv}
             cdevice=`echo $fnocsv|awk -F '_' '{print $1}'`
             time_long=`echo $fnocsv|awk -F '_' '{print $2}'`
             date_str=$(date -d @$time_long +'%Y-%m-%d')
             if [ `echo $datelist|grep $date_str|wc -l` -eq 1 ];then
                echo "[$process_file][$thread] match ${bucket_prefix}$bucket/$folder/$orgid/${f} $date_str">>$log_file
    #            echo "[$process_file][$thread] aws s3 mv ${bucket_prefix}$bucket/$folder/$orgid/${f} ${bucket_prefix}$bucket/$orgid/${f}"|tee -a $log_file
                aws s3 mv ${bucket_prefix}${bucket}/backup/$orgid/${f} ${bucket_prefix}${bucket}/$orgid/${f}
                aws s3 mv ${bucket_prefix}${bucket}/dupcheck/$orgid/${f} ${bucket_prefix}${bucket}/$orgid/${f}
    #            aws s3 mv ${bucket_prefix}sta/backup/$orgid/${f} ${bucket_prefix}sta/$orgid/${f}
    #            aws s3 mv ${bucket_prefix}sta/dupcheck/$orgid/${f} ${bucket_prefix}sta/$orgid/${f}
             fi
          done
       done
    done
    View Code

    2.3.4 pm_missing_data_move.py

    import os,sys
    import logging.handlers
    import boto3
    import botocore.session
    from concurrent import futures
    import subprocess
    import datetime
    import logging
    import argparse
    import smtplib
    import threading
    from email.mime.multipart import MIMEMultipart
    from email.mime.text import MIMEText
    from urllib import unquote
    pool = futures.ThreadPoolExecutor(max_workers=10)
    device_pool = futures.ThreadPoolExecutor(max_workers=10)
    bucket_pref="gcs.calix.com-wifi-pm-per-"
    logger = logging.getLogger()
    static_map={}
    
    
    def move_file(file_name,orgid):
        for b in ['radio','sta']:
            for folder in ['dupcheck','backup']:
                bucket_name="%s%s"%(bucket_pref,b)
                sfile="s3://%s/%s/%s/%s" %(bucket_name,folder,orgid,file_name)
                dfile="s3://%s/%s/%s" %(bucket_name,orgid,file_name)
                logger.error("aws s3 mv %s %s",sfile,dfile)
                subprocess.call(["aws", "s3","mv",sfile,dfile])
    
    
    def process_device(orgid,device,time_list):
        logger.error("%s Begin process org:%s ,device:%s missing date:%s"%(filename,orgid,device,','.join(time_list)))
        subprocess.call(['./fulfill_data.sh',orgid,device,','.join(time_list),filename,threading.currentThread().getName()])
        return
    
    def process_org_data(orgid,device_map):
        logger.error("%s--Begin process org:%s ,with devices:%s --" %(filename,orgid,len(device_map)))
        for device in device_map.keys():
            device_pool.submit(process_device,orgid,device,device_map[device])
            #process_device(orgid,device,device_map[device])
    
    
    def init_logger():
        """
        initial logger info
        :return:
        """
    
        handler = logging.StreamHandler(sys.stderr)
        fmt = '%(asctime)s-[%(filename)s:%(lineno)s]-[%(threadName)s]- %(message)s'
        formatter = logging.Formatter(fmt)
        handler.setFormatter(formatter)
        logger.setLevel(logging.ERROR)
        logger.addHandler(handler)
    
    
    def generate_body():
        body = """<h4>Daily PM Files RE-SYNC STATICS</h4>"""
        body+="<br><br><hr><div>"
        body+="<table>"
        body+="<tr><th>Org</th><th>Device</th><th>Date Time</th><th>File Name</th></tr>"
        for orgid in static_map.keys():
            device_map=static_map[orgid]
            cnt=0
            for device in device_map.keys():
                list=device_map[device]
                for detail in list:
                    cnt+=1
                    body+="<tr><td>  %s  </td><td>  %s  </td><td>  %s  </td><td>  %s  </td></tr>"
                          %(orgid,device,detail['date_time'],detail['file_name'])
            body+="<tr><td>Total</td><td colspan=3>devices: %s total number:%s</td></tr>"%(len(device_map),cnt)
        body+="</table>"
        return body
    
    
    def generate_static(args):
        logging.error(static_map)
        #if len(static_map)==0:
        #    logging.error("No need to send email")
        #    return
        subject = "[%s] Daily PM Files RE-SYNC STATICS[%s]"%(datetime.datetime.now().strftime('%y-%m-%d'),
                                                             "FullSync" if args.fullsync else "Delta")
        body = generate_body()
        emails = args.email
        smtp_host = args.smtpHost
        smtp_port = args.smtpPort
        username = args.username
        password = args.password
        sent_from = args.sentFrom
        if emails:
            mail_server = smtplib.SMTP()
            try:
                msg = MIMEMultipart()
                msg['From'] = sent_from
                msg['Subject'] = subject
                msg.attach(MIMEText(body, 'html'))
                mail_server.connect(smtp_host, smtp_port)
                mail_server.ehlo()
                mail_server.starttls()
                mail_server.ehlo()
                if password and smtp_host != 'eng-smtp.calix.local':
                    mail_server.login(username, unquote(password))
                for recipient in emails:
                    logger.error("send email to %s", recipient)
                    msg['To'] = recipient
                    mail_server.sendmail(sent_from, recipient, msg.as_string())
            except Exception as err:
                logger.error("send email failed:%s", err)
                return False
            finally:
                if mail_server:
                    mail_server.close()
            return True
        return False
    
    
    if __name__ == '__main__':
        init_logger()
        parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
        parser.add_argument('-e', '--email', action='append',
                            help="To send multiple emails --email <email-address-1> --email <email-address-2> ...")
        parser.add_argument('--smtpHost', type=str, help="Host of SMTP server", required=False,
                            default="outlook.office365.com")
        parser.add_argument('--smtpPort', type=int, help="Port of SMTP server", required=False, default=587)
        parser.add_argument('--sentFrom', type=str, help="outlook email", required=False,
                            default="noreply-compass-fa@calix.com")
        parser.add_argument('--username', type=str, help="outlook username", required=False,
                            default="noreply-compass-fa@calix.com")
        parser.add_argument('--password', type=str, default='xx', help="outlook password", required=False)
        parser.add_argument('--fullsync',action='store_true',help="full sync all results",required=False)
        parser.add_argument('--filename',type=str,default="result.txt",help="full sync all results",required=False)
        args = parser.parse_args()
        global filename
        filename=args.filename
        logger.error(args)
        org_map={}
        history_map={}
        if not args.fullsync:
            if os.path.isfile('result.txt.bak'):
                with open('result.txt.bak') as fp:
                    line = fp.readline()
                    cnt = 1
                    while line:
                        line = fp.readline()
                        cnt += 1
                        if cnt>2 and line.lstrip().find('|')>0:
                            tmp=line.split('|')
                            orgid=tmp[0].strip()
                            device=tmp[1].strip()
                            date_time=tmp[2].split()[0].strip()
                            _tmp_str='%s-%s-%s'%(orgid,device,date_time)
                            history_map[_tmp_str]=_tmp_str
        with open(filename) as fp:
            line = fp.readline()
            #cnt = 1
            while line:
                line = fp.readline()
             #   cnt += 1
    #            if cnt>2 and line.lstrip().find('|')>0:
                if line.lstrip().find('|')>0 and line.split('|')[0].strip()!='orgid':
                    tmp=line.split('|')
                    orgid=tmp[0].strip()
                    device=tmp[1].strip()
                    date_time=tmp[2].split()[0].strip()
                    _tmp_str='%s-%s-%s'%(orgid,device,date_time)
                    if history_map.has_key(_tmp_str):
                        logger.info("orgid:%s device:%s date_time:%s had executed on last job,ignore",orgid,device,date_time)
                        continue
                    device_map=org_map[orgid] if org_map.has_key(orgid) else {}
                    time_list=device_map[device] if device_map.has_key(device) else []
                    time_list.append(date_time)
                    device_map[device]=time_list
                    org_map[orgid]=device_map
        if org_map.has_key('50'):
            org_map.pop('50')
        for org in org_map.keys():
            process_org_data(org,org_map[org])
        pool.shutdown(wait=True)
        device_pool.shutdown(wait=True)
        #generate_static(args)
    View Code

    2.3.5 sendEmail.py

    #!/usr/bin/python
    # -*- coding: UTF-8 -*-
    import os,sys
    import json
    import urllib2
    import argparse
    import logging
    import logging.handlers
    
    import smtplib
    from email.mime.multipart import MIMEMultipart
    from email.mime.text import MIMEText
    from email.mime.multipart import MIMEBase
    
    from email import Encoders
    from urllib import unquote
    
    
    logger = logging.getLogger()
    
    
    def get_response(url):
        jsonObj = None
        try:
            req = urllib2.Request(url)
            res_data = urllib2.urlopen(req)
            jsonObj = json.loads(res_data.read())
        except Exception as e:
            logger.error("get response error", e)
        finally:
            return jsonObj
    
    
    def generate_body():
        body = """<h4>Simulator Damo Org Data</h4>"""
        with open('log.log') as fp:
            line = fp.readline()
            cnt = 1
            while line:
                body+=line+"<br>"
                line = fp.readline()
                cnt += 1
        return body
    
    
    def send_email(args):
        subject = args.subject
        body = args.body
        emails = args.email
        smtp_host = args.smtpHost
        smtp_port = args.smtpPort
        username = args.username
        password = args.password
        sent_from = args.sentFrom
        if emails:
            mail_server = smtplib.SMTP()
            try:
                msg = MIMEMultipart()
                msg['From'] = sent_from
                msg['Subject'] = subject
                msg.attach(MIMEText(body, 'html'))
                if args.attachment:
                    for name in args.attachment:
                        part = MIMEBase('application', "octet-stream")
                        part.set_payload(open(name, "rb").read())
                        Encoders.encode_base64(part)
                        part.add_header('Content-Disposition', 'attachment; filename="%s"'%(name))
                        msg.attach(part)
                mail_server.connect(smtp_host, smtp_port)
                mail_server.ehlo()
                mail_server.starttls()
                mail_server.ehlo()
                if password and smtp_host != 'eng-smtp.calix.local':
                    mail_server.login(username, unquote(password))
                for recipient in emails:
                    logger.info("send email to %s", recipient)
                    msg['To'] = recipient
                    mail_server.sendmail(sent_from, recipient, msg.as_string())
            except Exception as err:
                logger.error("send email failed:%s", err)
                return False
            finally:
                if mail_server:
                    mail_server.close()
            return True
        return False
    
    
    def init_log():
        """
        init logger
        :param log_file:
        :return:
        """
        logger.setLevel(logging.DEBUG)
        handler = logging.StreamHandler(sys.stderr)
        fmt = '%(asctime)s-[%(filename)s:%(lineno)s]-[%(threadName)s]- %(message)s'
        formatter = logging.Formatter(fmt)
        handler.setFormatter(formatter)
        logger.addHandler(handler)
    
    
    
    def main():
        parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
        parser.add_argument('-e', '--email', action='append',
                            help="To send multiple emails --email <email-address-1> --email <email-address-2> ...")
        parser.add_argument('--smtpHost', type=str, help="Host of SMTP server", required=False,
                            default="outlook.office365.com")
        parser.add_argument('--smtpPort', type=int, help="Port of SMTP server", required=False, default=587)
        parser.add_argument('--sentFrom', type=str, help="outlook email", required=False,
                            default="xx)
        parser.add_argument('--username', type=str, help="outlook username", required=False,
                            default="xx)
        parser.add_argument('--password', type=str, default='xx', help="outlook password", required=False)
        parser.add_argument('-s','--subject',default='test subject')
        parser.add_argument('-b','--body',default='test body')
        parser.add_argument('-a','--attachment',action='append')
        args = parser.parse_args()
        init_log()
        logger.debug("*" * 20)
        logger.debug(args)
        logger.debug("*" * 20)
        send_email(args)
    
    
    if __name__ == '__main__':
        main()
    View Code

    2.3.6 netstat_check.sh

    #!/bin/bash
    ss -s
    echo "-----------------------------------------"
    netstat -n | awk '/^tcp/ {++state[$NF]} END {for(key in state) print key,"	",state[key]}'
    echo "-----------------------------------------"
    lsof |grep s3_daily_pm_check_missing_dat|awk '{print $1}'|sort|uniq -c|sort -n
    echo "-----------------------------------------"
    lsof |grep s3_daily_pm_check_missing_dat
    View Code

     2.3.7 org_fulfill.sh

    #!/bin/bash
    orgId=$1
    bucket_prefix="s3://wifi-pm-per-"
    if [ -z $orgId ];then
       echo "Missing OrgId"
       exit
    fi
    cat result.txt |grep $orgId>org_$orgId
    for bucket in sta radio
    do
       for folder in backup dupcheck
       do
          rm -rf ${bucket}_${folder}_${orgId}
          for f in `aws s3 ls ${bucket_prefix}$bucket/$folder/$orgId/|awk '{print $4}'`
          do
             fnocsv=${f%%.csv}
             cdevice=`echo $fnocsv|awk -F '_' '{print $1}'`
             time_long=`echo $fnocsv|awk -F '_' '{print $2}'`
             date_str=$(date -d @$time_long +'%Y-%m-%d')
             echo "$f  $date_str">>${bucket}_${folder}_${orgId}
          done
        done
    done
    for device in `cat org_$orgId|awk -F '|' '{print $2}'|sort|uniq`
    do
       echo "Begin process device $device"
       date_list=`cat org_$orgId|grep $device|awk -F '|' '{print $3}'|awk '{print $1}'|awk '{printf ("%s,",$0)}'`
       for bucket in sta radio
       do
          for folder in backup dupcheck
          do
             cat ${bucket}_${folder}_${orgId}|grep $device>${bucket}_${folder}_${orgId}_$device
             while IFS= read -r line
             do
                date_str=`echo $line|awk '{print $2}'`
                filename=`echo $line|awk '{print $1}'`
                if [ `echo $date_list|grep $date_str |wc -l` -eq 1 ];then
                   echo "match $bucket/$orgId/${folder}/$filename with date $date_str"
                   echo "aws s3 mv ${bucket_prefix}$bucket/$orgId/${folder}/$filename ${bucket_prefix}$bucket/$orgId/$filename"
                   #aws s3 mv ${bucket_prefix}$bucket/$orgId/${folder}/$filename ${bucket_prefix}$bucket/$orgId/$filename
                fi
             done < ${bucket}_${folder}_${orgId}_$device
             rm ${bucket}_${folder}_${orgId}_$device
          done
       done
    done
    View Code

    2.4 collect postgres status

    2.4.1 env.sh

    #!/bin/bash
    export psql_us_prod="psql -h xx -d cloud -U calixcloud"
    export psql_us_prod_pass="xx"
    
    export psql_ca_prod="psql -h xx -d cloud -U calixcloud"
    export psql_ca_prod_pass="xx"
    
    export psql_dev="psql -h xx -d devops -U postgres"
    export psql_dev_pass="postgres"
    View Code

    2.4.2 postgres_cron.sh

    #!/bin/bash
    BASEDIR=$(cd $(dirname $0) && pwd)
    cd $BASEDIR>/dev/null
    source env.sh
    cron=$1
    if [ -z $cron ];then
       cron="1hour"
    fi
    logs=$BASEDIR/logs/psql_cronjob_${cron}_`TZ=:Asia/Hong_Kong date +'%Y-%m-%d'`.log
    process()
    {
       env=$1
       echo "-----------------------------------------------"|tee -a $logs
       echo "Begin process $env"                             |tee -a $logs
       echo "-----------------------------------------------"|tee -a $logs
       pass=`eval echo '$'psql_"${env}_pass"`
       cmd=`eval echo '$'psql_"$env"`
       export PGPASSWORD=$pass
       for f in `ls cron_${cron}/from*`
       do
          echo "$cmd -f $f"|tee -a $logs
          $cmd -f $f |tee -a $logs 2>>$logs
       done
    
       export PGPASSWORD=$psql_dev_pass
       for f in `ls cron_${cron}/to_*.tmp`
       do
          sql=${f%%.tmp}
          cp $f $sql
          sed -i "s/ENV/$env/g" $sql
          echo "$psql_dev -f $sql"|tee -a $logs
          $psql_dev -f $sql|tee -a $logs 2>>$logs
    
       done
       #$cmd -f from_postgres_table_summarize.sql |tee -a $logs 2>>$logs
       #rm -rf to_postgres_process.sql
       #cp to_postgres_process.sql.tmp to_postgres_process.sql
       #sed -i "s/ENV/$env/g" to_postgres_process.sql
       #export PGPASSWORD=$dev_pass
       #$dev -f to_postgres_process.sql|tee -a $logs 2>>$logs
    }
    
    main()
    {
       cd $BASEDIR>/dev/null
       echo ""|tee -a $logs
       echo ""|tee -a $logs
       echo "+++++++++++++++++++++++++++++++++++++++++++++++"|tee -a $logs
       echo "[`TZ=:Asia/Hong_Kong date +'%Y-%m-%d %H:%M:%S'`] BEGIN postgres $cron DATA"|tee -a $logs
       echo "+++++++++++++++++++++++++++++++++++++++++++++++"|tee -a $logs
       process "us_prod"
       process "ca_prod"
       echo "[`TZ=:Asia/Hong_Kong date +'%Y-%m-%d %H:%M:%S'`] FINISH process $cron DATA"|tee -a $logs
       echo |tee -a $logs
    }
    main
    View Code

    2.4.3 cron_1min/to_postgres_active_sql.sql.tmp

    truncate table pg_stat_activity_tmp;
    COPY pg_stat_activity_tmp from '/tmp/psql_pg_stat_activity.csv' csv header;
    delete from pg_stat_activity_tmp where query_start is null;
    delete from pg_stat_activity_tmp a using env_pg_stat_activity b where b.env_name='ENV' and a.pid=b.pid and a.usesysid=b.usesysid and a.query_start=b.query_start;
    delete from pg_stat_activity_tmp_last where env_name='ENV';
    insert into pg_stat_activity_tmp_last select * from pg_stat_activity_tmp;
    update pg_stat_activity_tmp_last set env_name='ENV',dur_time=age(clock_timestamp(), query_start) where env_name is null;
    insert into env_pg_stat_activity select * from pg_stat_activity_tmp_last where env_name='ENV';
    
    truncate table pg_stat_activity_run_tmp;
    COPY pg_stat_activity_run_tmp from '/tmp/psql_pg_stat_activity_active.csv' csv header;
    delete from env_pg_stat_activity_run a using pg_stat_activity_run_tmp b where a.env_name='ENV' and a.pid=b.pid and a.usesysid=b.usesysid and  a.query_start=b.query_start ;
    insert into env_pg_stat_activity_run select * from pg_stat_activity_run_tmp where usename!='replication';
    update env_pg_stat_activity_run set env_name='ENV',dur_time=age(clock_timestamp(), query_start) where env_name is null;
    
    with t as (select a.pid,a.usesysid,a.query_start,a.env_name from env_pg_stat_activity_run a left join pg_stat_activity_run_tmp b on a.pid=b.pid and a.usesysid=b.usesysid and a.query_start=b.query_start where a.env_name='ENV' and b.pid is null) update env_pg_stat_activity_run x set state='idle' from t where x.pid=t.pid and x.usesysid=t.usesysid and x.query_start=t.query_start and x.env_name=t.env_name;
    
    delete from env_pg_stat_activity_run a using env_pg_stat_activity b where a.env_name=b.env_name and a.env_name='ENV' and a.pid=b.pid and a.usesysid=b.usesysid and a.query_start=b.query_start and a.state='idle';
    insert into env_pg_stat_activity select * from  env_pg_stat_activity_run where state='idle' and env_name='ENV';
    delete from env_pg_stat_activity_run where state='idle' and env_name='ENV';
    View Code

    2.4.4 cron_1min/from_postgres_active_sql.sql

    COPY (select * from pg_stat_activity where pid<>pg_backend_pid() and state='idle' and upper(query) not like 'SET%' and upper(query) not like 'SHOW%' and query != 'COMMIT') to '/tmp/psql_pg_stat_activity.csv' csv header;
    
    COPY (select * from pg_stat_activity where pid<>pg_backend_pid() and state='active' and upper(query) not like 'SET%' and upper(query) not like 'SHOW%' and query != 'COMMIT') to '/tmp/psql_pg_stat_activity_active.csv' csv header;
    View Code

    2.4.5 cron_1hour/from_postgres_table_summarize.sql

    COPY (select table_name,pg_size_pretty(total_bytes) AS total, pg_size_pretty(index_bytes) AS idx , pg_size_pretty(toast_bytes) AS toast , pg_size_pretty(table_bytes) AS relsize,total_bytes,index_bytes,toast_bytes,table_bytes from (select *, total_bytes-index_bytes-COALESCE(toast_bytes,0) AS table_bytes FROM ( SELECT c.oid,nspname AS table_schema, relname AS table_name , c.reltuples AS row_estimate , pg_total_relation_size(c.oid) AS total_bytes , pg_indexes_size(c.oid) AS index_bytes , pg_total_relation_size(reltoastrelid) AS toast_bytes FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace WHERE relkind = 'r' and relname in (select tablename from pg_tables where schemaname='public') ) a) a order by total_bytes desc) TO '/tmp/table_size.csv' csv header;
    COPY (select relname,seq_scan,seq_tup_read,idx_scan,idx_tup_fetch,n_tup_ins,n_tup_upd,n_tup_del,n_tup_hot_upd,n_live_tup,n_dead_tup,n_mod_since_analyze,last_vacuum,last_autovacuum,last_analyze,last_autoanalyze,vacuum_count,autovacuum_count,analyze_count,autoanalyze_count from pg_stat_user_tables where schemaname='public') to '/tmp/table_opt.csv' csv header;
    View Code

    2.4.6 cron_1hour/to_postgres_process.sql.tmp

    delete from env_table_size_inc where env_name='ENV' and date_time in (select to_timestamp(to_char(now(),'YYYY-mm-dd HH24:00:00'),'YYYY-mm-dd HH24:MI:SS'));
    
    --truncate table table_size_tmp
    truncate table table_size_tmp;
    --copy data
    COPY table_size_tmp from '/tmp/table_size.csv' csv header;
    --summarize delta
    insert into env_table_size_inc select to_timestamp(to_char(now(),'YYYY-mm-dd HH24:00:00'),'YYYY-mm-dd HH24:MI:SS'),'ENV',a.table_name,a.total,a.idx,a.toast,a.relsize,(a.total_bytes - b.total_bytes),(a.index_bytes-b.index_bytes),(a.toast_bytes - b.toast_bytes),(a.table_bytes - b.table_bytes) from table_size_tmp a left join table_size_tmp_last b on a.table_name=b.table_name and b.env_name='ENV';
    --copy new data
    delete from table_size_tmp_last where env_name='ENV';
    insert into table_size_tmp_last select * from table_size_tmp;
    update table_size_tmp_last set env_name='ENV' where env_name is null;
    
    
    delete from env_table_opt_inc where env_name='ENV' and date_time in (select to_timestamp(to_char(now(),'YYYY-mm-dd HH24:00:00'),'YYYY-mm-dd HH24:MI:SS'));
    
    truncate table table_opt_tmp;
    COPY table_opt_tmp from '/tmp/table_opt.csv' csv header;
    insert into env_table_opt_inc select to_timestamp(to_char(now(),'YYYY-mm-dd HH24:00:00'),'YYYY-mm-dd HH24:MI:SS'),'ENV',a.table_name,a.seq_scan,a.seq_tup_read,a.idx_scan,a.idx_tup_fetch,a.n_tup_ins,a.n_tup_upd,a.n_tup_del,a.n_tup_hot_upd,a.n_live_tup,a.n_dead_tup,a.n_mod_since_analyze,a.last_vacuum,a.last_autovacuum,a.last_analyze,a.last_autoanalyze,a.vacuum_count,a.autovacuum_count,a.analyze_count,a.autoanalyze_count,(a.seq_scan - b.seq_scan),(a.seq_tup_read - b.seq_tup_read  ),(a.idx_scan - b.idx_scan),(a.idx_tup_fetch - b.idx_tup_fetch ),(a.n_tup_ins - b.n_tup_ins),(a.n_tup_upd - b.n_tup_upd),(a.n_tup_del - b.n_tup_del),(a.n_tup_hot_upd - b.n_tup_hot_upd),(a.n_live_tup - b.n_live_tup),(a.n_dead_tup - b.n_dead_tup) from table_opt_tmp a left join table_opt_tmp_last b on a.table_name=b.table_name and b.env_name='ENV';
    delete from table_opt_tmp_last where env_name='ENV';
    insert into table_opt_tmp_last select * from table_opt_tmp;
    update table_opt_tmp_last set env_name='ENV' where env_name is null;
    View Code

    2.5 setup_csc

     2.5.1 envsubst.sh

    #!/bin/bash
    
    function usage() {
      echo "Usage: $0 -h -e <env-file> [subst-file]"
      echo "Options:"
      echo " -h|--help                     Print help instructions"
      echo " -e|--env                      Use the environment file"
      echo " subst-file                    The file to substitute env variales"
      exit 1
    }
    
    
    while [[ $# > 0 ]]; do
      key="$1"
      case $key in
        -h|--help)
        usage
        ;;
    
        -e|--env)
        ENV_FILE="$2"
        shift # past argument
        ;;
    
        *)
        SUBST_FILE="$1"
        ;;
      esac
      shift # past argument or value
    done
    
    if [ ! -f "$ENV_FILE" ]; then
      echo "Env file $ENV_FILE does not exist"
      usage
      echo
      exit 2
    fi
    
    if [ ! -f "$SUBST_FILE" ]; then
      echo "Substitution file $SUBST_FILE does not exist"
      usage
      echo
      exit 2
    fi
    
    SUBST_CMD=`type -p envsubst`
    if [ "X$SUBST_CMD" = "X" ]; then
      echo "Cannot find 'envsubst' command"
      usage
      echo
      exit 1
    fi
    
    . $ENV_FILE
    
    $SUBST_CMD <$SUBST_FILE
    View Code

    2.5.2 env.sh

    #!/bin/bash
    export env_name="workflow"
    export acs_host="10.245.247.163"
    export cloud_api_host="10.245.247.164"
    export kafka_bootstrap_server="10.245.247.163"
    export greenplum_host="10.245.247.173"
    export es_host="nancloud-onprem-05"
    export postgres_ip="nancloud-onprem-06.calix.local"
    export redis_url="10.245.248.141:26379,10.245.248.142:26379,10.245.248.143:26379"
    
    export redis_master_name="mymaster"
    export redis_pass="calix-redis-pass"
    export redis_database_index=4
    export greenplum_port="15432"
    export greenplum_db="onecloud"
    export greenplum_username="calixcloud"
    export greenplum_password="CalixCloud"
    export cloud_mongo_url="mongodb://cdc-kylin:27017/cloud_${env_name}?replicaSet=cmdctl"
    export postgres_db="${env_name}_cloud"
    export postgres_master_username="postgres"
    export postgres_master_password="postgres"
    export cloud_postgres_username="$env_name"
    export cloud_postgres_password="${env_name}_pass"
    export cwmp_postgers_ip="$postgres_ip"
    export cwmp_postgres_db="$postgres_db"
    export cwmp_postgres_username=$cloud_postgres_username
    export cwmp_postgres_password=$cloud_postgres_password
    export cloud_api_url="http://${cloud_api_host}:80"
    export pmgcs_host="pmgcs-aqatest.calix.com"
    export wifi_redshift_host="redshift.amazonaws.com"
    export wifi_redshift_port=5439
    export wifi_redshift_db="aqa"
    export wifi_redshift_username="aqa"
    export wifi_redshift_password="0099"
    export influxdb="http://10.245.242.247:8086"
    export SXACC_HOME="/home/sxacc"
    export public_software_address="http://nancloud-onprem-06:8080"
    export cloud_repo_home="/home/meteor/cloud_repo"
    export zookeeper_cluster="10.245.248.134:2181,10.245.248.133:2181,10.245.248.135:2181"
    export fa_kafka_cluster="10.245.248.132:9092,10.245.248.136:9092,10.245.248.137:9092"
    View Code

    2.5.3 util.sh

    #!/bin/bash
    step=0
    RED='33[0;31m'
    BLUE='33[1;32m'
    GREEN='33[1;34m'
    NC='33[0m' # No Color
    
    check_command()
    {
      cmd=$1
      if ! [ -x "$(command -v ${cmd})" ]; then
         echo "Error: ${cmd} is not installed."
         return 0
      fi
      return 1
    }
    
    check_user()
    {
      if [ `id|grep $1|wc -l` -eq 0 ];then
         echo -e "${RED}Must Use $1 User.$NC"
         exit 1
      fi
    }
    
    
    execute_command()
    {
      step=`expr "$step" + 1`
      cmd=$1
      echo -e "$BLUE[`date +'%Y-%m-%d %H:%M:%S'`][Step $step] exec $2 $NC"
      $cmd
    }
    
    execute_sql()
    {
      sql=$1
      export PGPASSWORD=$cloud_postgres_password
      psql -h $postgres_ip -d $postgres_db -U $cloud_postgres_username -c "$sql"
    }
    
    execute_sql_file()
    {
      file=$1
      export PGPASSWORD=$cloud_postgres_password
      psql -h $postgres_ip -d $postgres_db -U $cloud_postgres_username -f $file
    }
    
    create_folder()
    {
      if [ ! -d $1 ];then
         mkdir -p $1
      fi
    }
    View Code

    2.5.4 init_database/install.sh

    #!/bin/bash
    BASEDIR=$(cd $(dirname $0) && pwd)
    cd $BASEDIR>/dev/null
    source ../env.sh
    ../envsubst.sh -e ../env.sh grant.sql.tmp>postgres.sql
    source ../util.sh
    export PGPASSWORD=$postgres_master_password
    
    create_user()
    {
      if [ `psql -h $postgres_ip -d postgres -U $postgres_master_username -c "select usename from pg_user where usename='$cloud_postgres_username';"|grep $cloud_postgres_username|wc -l` -eq 1 ];then
        echo -e "${GREEN}User $cloud_postgres_username had created ,no need create,ignore$NC"
        return
      fi
      psql -h $postgres_ip -d postgres -U $postgres_master_username -c "create user $cloud_postgres_username with password '$cloud_postgres_password';"
      psql -h  $postgres_ip -d postgres -U $postgres_master_username -c "alter user $cloud_postgres_username with SUPERUSER;"
    }
    
    create_database()
    {
       if [ `psql -h $postgres_ip -d postgres -U $postgres_master_username -c "select datname from pg_database where datname='$postgres_db';"|grep $postgres_db|wc -l` -eq 1 ];then
          echo -e "${GREEN}DataBase $postgres_db had created,ignore$NC"
          return
       fi
       psql -h $postgres_ip -d postgres -U $postgres_master_username -c "create database $postgres_db;"
    }
    
    test_connection()
    {
      execute_sql "select now();"
    }
    
    initialize_db_schema()
    {
       execute_sql_file $BASEDIR/schema.sql
       execute_sql_file $BASEDIR/postgres.sql
    }
    
    initialize_db_data()
    {
       cd $BASEDIR>/dev/null
       for f in cloud_app_scopes calixentitlement calixorganization calixspid cloud_rbac_orgconfig cloud_roles cloud_role_scopes cloud_user_roles calixuser
       do
          execute_sql "truncate table $f CASCADE;"
          execute_sql "\COPY $f FROM '${BASEDIR}/$f.csv' DELIMITER ',' CSV HEADER"
       done
    }
    refresh_meterialized_view()
    {
    
      execute_sql "REFRESH MATERIALIZED VIEW device_view;"
    }
    main()
    {
       check_command "psql"
       retval=$?
       if [ $retval -eq 0 ];then
          exit 1
       fi
       execute_command "create_user" "Create User [$cloud_postgres_username]"
       execute_command "create_database" "Create DataBase [$postgres_db]"
       execute_command "test_connection" "Verify DB Connection"
       execute_command "initialize_db_schema" "Initialize Postgresql DB Schema"
       execute_command "initialize_db_data" "Initialize Postgresql DB Data"
       execute_command "refresh_meterialized_view" "Refresh View"
    }
    main
    View Code

    2.5.5 init_database/uninstall.sh

    #!/bin/bash
    BASEDIR=$(cd $(dirname $0) && pwd)
    cd $BASEDIR>/dev/null
    source ../env.sh
    ../envsubst.sh -e ../env.sh postgres.sql.tmp>postgres.sql
    source ../util.sh
    export PGPASSWORD=$postgres_master_password
    
    drop_user()
    {
      if [ `psql -h $postgres_ip -d postgres -U $postgres_master_username -c "select usename from pg_user where usename='$cloud_postgres_username';"|grep $cloud_postgres_username|wc -l` -eq 0 ];then
        echo -e "${GREEN}User $cloud_postgres_username not exist ,no need drop,ignore$NC"
        return
      fi
      psql -h $postgres_ip -d postgres -U $postgres_master_username -c "drop user $cloud_postgres_username;"
    }
    
    drop_database()
    {
       if [ `psql -h $postgres_ip -d postgres -U $postgres_master_username -c "select datname from pg_database where datname='$postgres_db';"|grep $postgres_db|wc -l` -eq 0 ];then
          echo -e "${GREEN}DataBase $postgres_db not exists,ignore$NC"
          return
       fi
       psql -h $postgres_ip -d postgres -U $postgres_master_username -c "drop database $postgres_db;"
    }
    
    test_connection()
    {
      execute_sql "select now();"
    }
    
    initialize_db_schema()
    {
       execute_sql_file $BASEDIR/postgres.sql
    }
    
    
    main()
    {
       check_command "psql"
       retval=$?
       if [ $retval -eq 0 ];then
          exit 1
       fi
       execute_command "drop_database" "Drop DataBase [$postgres_db]"
       execute_command "drop_user" "Drop User [$cloud_postgres_username]"
    }
    main
    View Code

    2.5.6 docker/install_docker_17.10.sh

    #!/bin/sh
    set -e
    
    CHANNEL="edge"
    
    docker_version=17.10.0
    apt_url="https://apt.dockerproject.org"
    yum_url="https://yum.dockerproject.org"
    gpg_fingerprint="9DC858229FC7DD38854AE2D88D81803C0EBFCD88"
    
    key_servers="
    ha.pool.sks-keyservers.net
    pgp.mit.edu
    keyserver.ubuntu.com
    "
    
    rhel_repos="
    rhel-7-server-extras-rpms
    rhui-REGION-rhel-server-extras
    rhui-rhel-7-server-rhui-extras-rpms
    "
    
    mirror=''
    while [ $# -gt 0 ]; do
            case "$1" in
                    --mirror)
                            mirror="$2"
                            shift
                            ;;
                    *)
                            echo "Illegal option $1"
                            ;;
            esac
            shift $(( $# > 0 ? 1 : 0 ))
    done
    
    case "$mirror" in
            AzureChinaCloud)
                    apt_url="https://mirror.azure.cn/docker-engine/apt"
                    yum_url="https://mirror.azure.cn/docker-engine/yum"
                    ;;
            Aliyun)
                    apt_url="https://mirrors.aliyun.com/docker-engine/apt"
                    yum_url="https://mirrors.aliyun.com/docker-engine/yum"
                    ;;
    esac
    
    command_exists() {
            command -v "$@" > /dev/null 2>&1
    }
    
    echo_docker_as_nonroot() {
            if command_exists docker && [ -e /var/run/docker.sock ]; then
                    (
                            set -x
                            $sh_c 'docker version'
                    ) || true
            fi
            your_user=your-user
            [ "$user" != 'root' ] && your_user="$user"
            # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output
            cat <<-EOF
    
            If you would like to use Docker as a non-root user, you should now consider
            adding your user to the "docker" group with something like:
    
              sudo usermod -aG docker $your_user
    
            Remember that you will have to log out and back in for this to take effect!
    
            WARNING: Adding a user to the "docker" group will grant the ability to run
                     containers which can be used to obtain root privileges on the
                     docker host.
                     Refer to https://docs.docker.com/engine/security/security/#docker-daemon-attack-surface
                     for more information.
    
            EOF
    }
    
    # Check if this is a forked Linux distro
    check_forked() {
    
            # Check for lsb_release command existence, it usually exists in forked distros
            if command_exists lsb_release; then
                    # Check if the `-u` option is supported
                    set +e
                    lsb_release -a -u > /dev/null 2>&1
                    lsb_release_exit_code=$?
                    set -e
    
                    # Check if the command has exited successfully, it means we're in a forked distro
                    if [ "$lsb_release_exit_code" = "0" ]; then
                            # Print info about current distro
                            cat <<-EOF
                            You're using '$lsb_dist' version '$dist_version'.
                            EOF
    
                            # Get the upstream release info
                            lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[[:space:]]')
                            dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[[:space:]]')
    
                            # Print info about upstream distro
                            cat <<-EOF
                            Upstream release is '$lsb_dist' version '$dist_version'.
                            EOF
                    else
                            if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then
                                    # We're Debian and don't even know it!
                                    lsb_dist=debian
                                    dist_version="$(cat /etc/debian_version | sed 's//.*//' | sed 's/..*//')"
                                    case "$dist_version" in
                                            9)
                                                    dist_version="stretch"
                                            ;;
                                            8|'Kali Linux 2')
                                                    dist_version="jessie"
                                            ;;
                                            7)
                                                    dist_version="wheezy"
                                            ;;
                                    esac
                            fi
                    fi
            fi
    }
    
    semverParse() {
            major="${1%%.*}"
            minor="${1#$major.}"
            minor="${minor%%.*}"
            patch="${1#$major.$minor.}"
            patch="${patch%%[-.]*}"
    }
    
    deprecation_notice() {
            echo
            echo
            echo "  WARNING: $1 is no longer updated @ $url"
            echo "           Installing the legacy docker-engine package..."
            echo
            echo
            sleep 10;
    }
    
    do_install() {
    
            architecture=$(uname -m)
            case $architecture in
                    # officially supported
                    amd64|x86_64)
                            ;;
                    # unofficially supported with available repositories
                    armv6l|armv7l)
                            ;;
                    # unofficially supported without available repositories
                    aarch64|arm64|ppc64le|s390x)
                            cat 1>&2 <<-EOF
                            Error: This install script does not support $architecture, because no
                            $architecture package exists in Docker's repositories.
    
                            Other install options include checking your distribution's package repository
                            for a version of Docker, or building Docker from source.
                            EOF
                            exit 1
                            ;;
                    # not supported
                    *)
                            cat >&2 <<-EOF
                            Error: $architecture is not a recognized platform.
                            EOF
                            exit 1
                            ;;
            esac
    
            if command_exists docker; then
                    version="$(docker -v | cut -d ' ' -f3 | cut -d ',' -f1)"
                    MAJOR_W=1
                    MINOR_W=10
    
                    semverParse $version
    
                    shouldWarn=0
                    if [ $major -lt $MAJOR_W ]; then
                            shouldWarn=1
                    fi
    
                    if [ $major -le $MAJOR_W ] && [ $minor -lt $MINOR_W ]; then
                            shouldWarn=1
                    fi
    
                    cat >&2 <<-'EOF'
                            Warning: the "docker" command appears to already exist on this system.
    
                            If you already have Docker installed, this script can cause trouble, which is
                            why we're displaying this warning and provide the opportunity to cancel the
                            installation.
    
                            If you installed the current Docker package using this script and are using it
                    EOF
    
                    if [ $shouldWarn -eq 1 ]; then
                            cat >&2 <<-'EOF'
                            again to update Docker, we urge you to migrate your image store before upgrading
                            to v1.10+.
    
                            You can find instructions for this here:
                            https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration
                            EOF
                    else
                            cat >&2 <<-'EOF'
                            again to update Docker, you can safely ignore this message.
                            EOF
                    fi
    
                    cat >&2 <<-'EOF'
    
                            You may press Ctrl+C now to abort this script.
                    EOF
                    ( set -x; sleep 20 )
            fi
    
            user="$(id -un 2>/dev/null || true)"
    
            sh_c='sh -c'
            if [ "$user" != 'root' ]; then
                    if command_exists sudo; then
                            sh_c='sudo -E sh -c'
                    elif command_exists su; then
                            sh_c='su -c'
                    else
                            cat >&2 <<-'EOF'
                            Error: this installer needs the ability to run commands as root.
                            We are unable to find either "sudo" or "su" available to make this happen.
                            EOF
                            exit 1
                    fi
            fi
    
            curl=''
            if command_exists curl; then
                    curl='curl -sSL'
            elif command_exists wget; then
                    curl='wget -qO-'
            elif command_exists busybox && busybox --list-modules | grep -q wget; then
                    curl='busybox wget -qO-'
            fi
    
            # check to see which repo they are trying to install from
            if [ -z "$repo" ]; then
                    repo='main'
                    if [ "https://test.docker.com/" = "$url" ]; then
                            repo='testing'
                    elif [ "https://experimental.docker.com/" = "$url" ]; then
                            repo='experimental'
                    fi
            fi
    
            # perform some very rudimentary platform detection
            lsb_dist=''
            dist_version=''
            if command_exists lsb_release; then
                    lsb_dist="$(lsb_release -si)"
            fi
            if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then
                    lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")"
            fi
            if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then
                    lsb_dist='debian'
            fi
            if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then
                    lsb_dist='fedora'
            fi
            if [ -z "$lsb_dist" ] && [ -r /etc/oracle-release ]; then
                    lsb_dist='oracleserver'
            fi
            if [ -z "$lsb_dist" ] && [ -r /etc/centos-release ]; then
                    lsb_dist='centos'
            fi
            if [ -z "$lsb_dist" ] && [ -r /etc/redhat-release ]; then
                    lsb_dist='redhat'
            fi
            if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then
                    lsb_dist="$(. /etc/os-release && echo "$ID")"
            fi
    
            lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
    
            # Special case redhatenterpriseserver
            if [ "${lsb_dist}" = "redhatenterpriseserver" ]; then
                    # Set it to redhat, it will be changed to centos below anyways
                    lsb_dist='redhat'
            fi
    
            case "$lsb_dist" in
    
                    ubuntu)
                            if command_exists lsb_release; then
                                    dist_version="$(lsb_release --codename | cut -f2)"
                            fi
                            if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then
                                    dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")"
                            fi
                    ;;
    
                    debian|raspbian)
                            dist_version="$(cat /etc/debian_version | sed 's//.*//' | sed 's/..*//')"
                            case "$dist_version" in
                                    9)
                                            dist_version="stretch"
                                    ;;
                                    8)
                                            dist_version="jessie"
                                    ;;
                                    7)
                                            dist_version="wheezy"
                                    ;;
                            esac
                    ;;
    
                    oracleserver)
                            # need to switch lsb_dist to match yum repo URL
                            lsb_dist="oraclelinux"
                            dist_version="$(rpm -q --whatprovides redhat-release --queryformat "%{VERSION}
    " | sed 's//.*//' | sed 's/..*//' | sed 's/Server*//')"
                    ;;
    
                    fedora|centos|redhat)
                            dist_version="$(rpm -q --whatprovides ${lsb_dist}-release --queryformat "%{VERSION}
    " | sed 's//.*//' | sed 's/..*//' | sed 's/Server*//' | sort | tail -1)"
                    ;;
    
                    *)
                            if command_exists lsb_release; then
                                    dist_version="$(lsb_release --codename | cut -f2)"
                            fi
                            if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
                                    dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
                            fi
                    ;;
    
    
            esac
    
            # Check if this is a forked Linux distro
            check_forked
    
            # Run setup for each distro accordingly
            case "$lsb_dist" in
                    ubuntu|debian)
                            pre_reqs="apt-transport-https ca-certificates curl"
                            if [ "$lsb_dist" = "debian" ] && [ "$dist_version" = "wheezy" ]; then
                                    pre_reqs="$pre_reqs python-software-properties"
                                    backports="deb http://ftp.debian.org/debian wheezy-backports main"
                                    if ! grep -Fxq "$backports" /etc/apt/sources.list; then
                                            (set -x; $sh_c "echo "$backports" >> /etc/apt/sources.list")
                                    fi
                            else
                                    pre_reqs="$pre_reqs software-properties-common"
                            fi
                            if ! command -v gpg > /dev/null; then
                                    pre_reqs="$pre_reqs gnupg"
                            fi
                            apt_repo="deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/$lsb_dist $dist_version $CHANNEL"
                            (
                                    set -x
                                    $sh_c 'apt-get update'
                                    $sh_c "apt-get install -y -q $pre_reqs"
                                    curl -fsSl "https://download.docker.com/linux/$lsb_dist/gpg" | $sh_c 'apt-key add -'
                                    $sh_c "add-apt-repository "$apt_repo""
                                    if [ "$lsb_dist" = "debian" ] && [ "$dist_version" = "wheezy" ]; then
                                            $sh_c 'sed -i "/deb-src.*download.docker/d" /etc/apt/sources.list'
                                    fi
                                    $sh_c 'apt-get update'
                                    $sh_c "apt-get install -y -q docker-ce=$(apt-cache madison docker-ce | grep ${docker_version} | head -n 1 | cut -d ' ' -f 4)"
                            )
                            echo_docker_as_nonroot
                            exit 0
                            ;;
                    centos|fedora|redhat|oraclelinux)
                            yum_repo="https://download.docker.com/linux/centos/docker-ce.repo"
                            if [ "$lsb_dist" = "fedora" ]; then
                                    if [ "$dist_version" -lt "24" ]; then
                                            echo "Error: Only Fedora >=24 are supported by $url"
                                            exit 1
                                    fi
                                    pkg_manager="dnf"
                                    config_manager="dnf config-manager"
                                    enable_channel_flag="--set-enabled"
                                    pre_reqs="dnf-plugins-core"
                            else
                                    pkg_manager="yum"
                                    config_manager="yum-config-manager"
                                    enable_channel_flag="--enable"
                                    pre_reqs="yum-utils"
                            fi
                            (
                                    set -x
                                    if [ "$lsb_dist" = "redhat" ]; then
                                            for rhel_repo in $rhel_repos ; do
                                                    $sh_c "$config_manager $enable_channel_flag $rhel_repo"
                                            done
                                    fi
                                    $sh_c "$pkg_manager install -y -q $pre_reqs"
                                    $sh_c "$config_manager --add-repo $yum_repo"
                                    if [ "$CHANNEL" != "stable" ]; then
                                            echo "Info: Enabling channel '$CHANNEL' for docker-ce repo"
                                            $sh_c "$config_manager $enable_channel_flag docker-ce-$CHANNEL"
                                    fi
                                    $sh_c "$pkg_manager makecache fast"
                                    $sh_c "$pkg_manager install -y -q docker-ce-${docker_version}.ce"
                                    if [ -d '/run/systemd/system' ]; then
                                            $sh_c 'service docker start'
                                    else
                                            $sh_c 'systemctl start docker'
                                    fi
                            )
                            echo_docker_as_nonroot
                            exit 0
                            ;;
                    raspbian)
                            deprecation_notice "$lsb_dist"
                            export DEBIAN_FRONTEND=noninteractive
    
                            did_apt_get_update=
                            apt_get_update() {
                                    if [ -z "$did_apt_get_update" ]; then
                                            ( set -x; $sh_c 'sleep 3; apt-get update' )
                                            did_apt_get_update=1
                                    fi
                            }
    
                            if [ "$lsb_dist" != "raspbian" ]; then
                                    # aufs is preferred over devicemapper; try to ensure the driver is available.
                                    if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then
                                            if uname -r | grep -q -- '-generic' && dpkg -l 'linux-image-*-generic' | grep -qE '^ii|^hi' 2>/dev/null; then
                                                    kern_extras="linux-image-extra-$(uname -r) linux-image-extra-virtual"
    
                                                    apt_get_update
                                                    ( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true
    
                                                    if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then
                                                            echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)'
                                                            echo >&2 ' but we still have no AUFS.  Docker may not work. Proceeding anyways!'
                                                            ( set -x; sleep 10 )
                                                    fi
                                            else
                                                    echo >&2 'Warning: current kernel is not supported by the linux-image-extra-virtual'
                                                    echo >&2 ' package.  We have no AUFS support.  Consider installing the packages'
                                                    echo >&2 ' "linux-image-virtual" and "linux-image-extra-virtual" for AUFS support.'
                                                    ( set -x; sleep 10 )
                                            fi
                                    fi
                            fi
    
                            # install apparmor utils if they're missing and apparmor is enabled in the kernel
                            # otherwise Docker will fail to start
                            if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then
                                    if command -v apparmor_parser >/dev/null 2>&1; then
                                            echo 'apparmor is enabled in the kernel and apparmor utils were already installed'
                                    else
                                            echo 'apparmor is enabled in the kernel, but apparmor_parser is missing. Trying to install it..'
                                            apt_get_update
                                            ( set -x; $sh_c 'sleep 3; apt-get install -y -q apparmor' )
                                    fi
                            fi
    
                            if [ ! -e /usr/lib/apt/methods/https ]; then
                                    apt_get_update
                                    ( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https ca-certificates' )
                            fi
                            if [ -z "$curl" ]; then
                                    apt_get_update
                                    ( set -x; $sh_c 'sleep 3; apt-get install -y -q curl ca-certificates' )
                                    curl='curl -sSL'
                            fi
                            if ! command -v gpg > /dev/null; then
                                    apt_get_update
                                    ( set -x; $sh_c 'sleep 3; apt-get install -y -q gnupg2 || apt-get install -y -q gnupg' )
                            fi
    
                            # dirmngr is a separate package in ubuntu yakkety; see https://bugs.launchpad.net/ubuntu/+source/apt/+bug/1634464
                            if ! command -v dirmngr > /dev/null; then
                                    apt_get_update
                                    ( set -x; $sh_c 'sleep 3; apt-get install -y -q dirmngr' )
                            fi
    
                            (
                            set -x
                            for key_server in $key_servers ; do
                                    $sh_c "apt-key adv --keyserver hkp://${key_server}:80 --recv-keys ${gpg_fingerprint}" && break
                            done
                            $sh_c "apt-key adv -k ${gpg_fingerprint} >/dev/null"
                            $sh_c "mkdir -p /etc/apt/sources.list.d"
                            $sh_c "echo deb [arch=$(dpkg --print-architecture)] ${apt_url}/repo ${lsb_dist}-${dist_version} ${repo} > /etc/apt/sources.list.d/docker.list"
                            $sh_c 'sleep 3; apt-get update; apt-get install -y -q docker-engine'
                            )
                            echo_docker_as_nonroot
                            exit 0
                            ;;
                    rancheros)
                            (
                            set -x
                            $sh_c "sleep 3; ros engine switch -f $(sudo ros engine list | grep ${docker_version} | head -n 1 | cut -d ' ' -f 2)"
                            )
                            exit 0
                            ;;
            esac
    
            # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output
            cat >&2 <<-'EOF'
    
            Either your platform is not easily detectable or is not supported by this
            installer script.
            Please visit the following URL for more detailed installation instructions:
    
            https://docs.docker.com/engine/installation/
    
            EOF
            exit 1
    }
    
    # wrapped up in a function so that we have some protection against only getting
    # half the file during "curl | sh"
    do_install
    View Code

    2.5.7 docker/install_docker_1.12.6.sh

    #!/bin/sh
    set -e
    #
    # This script is meant for quick & easy install via:
    #   'curl -sSL https://get.docker.com/ | sh'
    # or:
    #   'wget -qO- https://get.docker.com/ | sh'
    #
    # For test builds (ie. release candidates):
    #   'curl -fsSL https://test.docker.com/ | sh'
    # or:
    #   'wget -qO- https://test.docker.com/ | sh'
    #
    # For experimental builds:
    #   'curl -fsSL https://experimental.docker.com/ | sh'
    # or:
    #   'wget -qO- https://experimental.docker.com/ | sh'
    #
    # Docker Maintainers:
    #   To update this script on https://get.docker.com,
    #   use hack/release.sh during a normal release,
    #   or the following one-liner for script hotfixes:
    #     aws s3 cp --acl public-read hack/install.sh s3://get.docker.com/index
    #
    
    url="https://get.docker.com/"
    docker_version=1.12.6
    apt_url="https://apt.dockerproject.org"
    yum_url="https://yum.dockerproject.org"
    gpg_fingerprint="58118E89F3A912897C070ADBF76221572C52609D"
    
    key_servers="
    ha.pool.sks-keyservers.net
    pgp.mit.edu
    keyserver.ubuntu.com
    "
    
    command_exists() {
            command -v "$@" > /dev/null 2>&1
    }
    
    echo_docker_as_nonroot() {
            if command_exists docker && [ -e /var/run/docker.sock ]; then
                    (
                            set -x
                            $sh_c 'docker version'
                    ) || true
            fi
            your_user=your-user
            [ "$user" != 'root' ] && your_user="$user"
            # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output
            cat <<-EOF
    
            If you would like to use Docker as a non-root user, you should now consider
            adding your user to the "docker" group with something like:
    
              sudo usermod -aG docker $your_user
    
            Remember that you will have to log out and back in for this to take effect!
    
            EOF
    }
    
    # Check if this is a forked Linux distro
    check_forked() {
    
            # Check for lsb_release command existence, it usually exists in forked distros
            if command_exists lsb_release; then
                    # Check if the `-u` option is supported
                    set +e
                    lsb_release -a -u > /dev/null 2>&1
                    lsb_release_exit_code=$?
                    set -e
    
                    # Check if the command has exited successfully, it means we're in a forked distro
                    if [ "$lsb_release_exit_code" = "0" ]; then
                            # Print info about current distro
                            cat <<-EOF
                            You're using '$lsb_dist' version '$dist_version'.
                            EOF
    
                            # Get the upstream release info
                            lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[[:space:]]')
                            dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[[:space:]]')
    
                            # Print info about upstream distro
                            cat <<-EOF
                            Upstream release is '$lsb_dist' version '$dist_version'.
                            EOF
                    else
                            if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then
                                    # We're Debian and don't even know it!
                                    lsb_dist=debian
                                    dist_version="$(cat /etc/debian_version | sed 's//.*//' | sed 's/..*//')"
                                    case "$dist_version" in
                                           9)
                                                   dist_version="stretch"
                                           ;;
                                            8|'Kali Linux 2')
                                                    dist_version="jessie"
                                            ;;
                                            7)
                                                    dist_version="wheezy"
                                            ;;
                                    esac
                            fi
                    fi
            fi
    }
    
    rpm_import_repository_key() {
            local key=$1; shift
            local tmpdir=$(mktemp -d)
            chmod 600 "$tmpdir"
            for key_server in $key_servers ; do
                    gpg --homedir "$tmpdir" --keyserver "$key_server" --recv-keys "$key" && break
            done
            gpg --homedir "$tmpdir" -k "$key" >/dev/null
            gpg --homedir "$tmpdir" --export --armor "$key" > "$tmpdir"/repo.key
            rpm --import "$tmpdir"/repo.key
            rm -rf "$tmpdir"
    }
    
    semverParse() {
            major="${1%%.*}"
            minor="${1#$major.}"
            minor="${minor%%.*}"
            patch="${1#$major.$minor.}"
            patch="${patch%%[-.]*}"
    }
    
    do_install() {
            case "$(uname -m)" in
                    *64)
                            ;;
                    armv6l|armv7l)
                            ;;
                    *)
                            cat >&2 <<-'EOF'
                            Error: you are not using a 64bit platform or a Raspberry Pi (armv6l/armv7l).
                            Docker currently only supports 64bit platforms or a Raspberry Pi (armv6l/armv7l).
                            EOF
                            exit 1
                            ;;
            esac
    
            if command_exists docker; then
                    version="$(docker -v | awk -F '[ ,]+' '{ print $3 }')"
                    MAJOR_W=1
                    MINOR_W=10
    
                    semverParse $version
    
                    shouldWarn=0
                    if [ $major -lt $MAJOR_W ]; then
                            shouldWarn=1
                    fi
    
                    if [ $major -le $MAJOR_W ] && [ $minor -lt $MINOR_W ]; then
                            shouldWarn=1
                    fi
    
                    cat >&2 <<-'EOF'
                            Warning: the "docker" command appears to already exist on this system.
    
                            If you already have Docker installed, this script can cause trouble, which is
                            why we're displaying this warning and provide the opportunity to cancel the
                            installation.
    
                            If you installed the current Docker package using this script and are using it
                    EOF
    
                    if [ $shouldWarn -eq 1 ]; then
                            cat >&2 <<-'EOF'
                            again to update Docker, we urge you to migrate your image store before upgrading
                            to v1.10+.
    
                            You can find instructions for this here:
                            https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration
                            EOF
                    else
                            cat >&2 <<-'EOF'
                            again to update Docker, you can safely ignore this message.
                            EOF
                    fi
    
                    cat >&2 <<-'EOF'
    
                            You may press Ctrl+C now to abort this script.
                    EOF
                    ( set -x; sleep 20 )
            fi
    
            user="$(id -un 2>/dev/null || true)"
    
            sh_c='sh -c'
            if [ "$user" != 'root' ]; then
                    if command_exists sudo; then
                            sh_c='sudo -E sh -c'
                    elif command_exists su; then
                            sh_c='su -c'
                    else
                            cat >&2 <<-'EOF'
                            Error: this installer needs the ability to run commands as root.
                            We are unable to find either "sudo" or "su" available to make this happen.
                            EOF
                            exit 1
                    fi
            fi
    
            curl=''
            if command_exists curl; then
                    curl='curl -sSL'
            elif command_exists wget; then
                    curl='wget -qO-'
            elif command_exists busybox && busybox --list-modules | grep -q wget; then
                    curl='busybox wget -qO-'
            fi
    
            # check to see which repo they are trying to install from
            if [ -z "$repo" ]; then
                    repo='main'
                    if [ "https://test.docker.com/" = "$url" ]; then
                            repo='testing'
                    elif [ "https://experimental.docker.com/" = "$url" ]; then
                            repo='experimental'
                    fi
            fi
    
            # perform some very rudimentary platform detection
            lsb_dist=''
            dist_version=''
            if command_exists lsb_release; then
                    lsb_dist="$(lsb_release -si)"
            fi
            if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then
                    lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")"
            fi
            if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then
                    lsb_dist='debian'
            fi
            if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then
                    lsb_dist='fedora'
            fi
            if [ -z "$lsb_dist" ] && [ -r /etc/oracle-release ]; then
                    lsb_dist='oracleserver'
            fi
            if [ -z "$lsb_dist" ] && [ -r /etc/centos-release ]; then
                    lsb_dist='centos'
            fi
            if [ -z "$lsb_dist" ] && [ -r /etc/redhat-release ]; then
                    lsb_dist='redhat'
            fi
            if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then
                    lsb_dist="$(. /etc/os-release && echo "$ID")"
            fi
    
            lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
    
            # Special case redhatenterpriseserver
            if [ "${lsb_dist}" = "redhatenterpriseserver" ]; then
                    # Set it to redhat, it will be changed to centos below anyways
                    lsb_dist='redhat'
            fi
    
            case "$lsb_dist" in
    
                    ubuntu)
                            if command_exists lsb_release; then
                                    dist_version="$(lsb_release --codename | cut -f2)"
                            fi
                            if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then
                                    dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")"
                            fi
                    ;;
    
                    debian|raspbian)
                            dist_version="$(cat /etc/debian_version | sed 's//.*//' | sed 's/..*//')"
                            case "$dist_version" in
                                    8)
                                            dist_version="jessie"
                                    ;;
                                    7)
                                            dist_version="wheezy"
                                    ;;
                            esac
                    ;;
    
                    oracleserver)
                            # need to switch lsb_dist to match yum repo URL
                            lsb_dist="oraclelinux"
                            dist_version="$(rpm -q --whatprovides redhat-release --queryformat "%{VERSION}
    " | sed 's//.*//' | sed 's/..*//' | sed 's/Server*//')"
                    ;;
    
                    fedora|centos|redhat)
                            dist_version="$(rpm -q --whatprovides ${lsb_dist}-release --queryformat "%{VERSION}
    " | sed 's//.*//' | sed 's/..*//' | sed 's/Server*//' | sort | tail -1)"
                    ;;
    
                    *)
                            if command_exists lsb_release; then
                                    dist_version="$(lsb_release --codename | cut -f2)"
                            fi
                            if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
                                    dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
                            fi
                    ;;
    
    
            esac
    
            # Check if this is a forked Linux distro
            check_forked
    
            # Run setup for each distro accordingly
            case "$lsb_dist" in
                    amzn)
                            (
                            set -x
                            $sh_c 'sleep 3; yum -y -q install docker'
                            )
                            echo_docker_as_nonroot
                            exit 0
                            ;;
    
                    'opensuse project'|opensuse)
                            echo 'Going to perform the following operations:'
                            if [ "$repo" != 'main' ]; then
                                    echo '  * add repository obs://Virtualization:containers'
                            fi
                            echo '  * install Docker'
                            $sh_c 'echo "Press CTRL-C to abort"; sleep 3'
    
                            if [ "$repo" != 'main' ]; then
                                    # install experimental packages from OBS://Virtualization:containers
                                    (
                                            set -x
                                            zypper -n ar -f obs://Virtualization:containers Virtualization:containers
                                            rpm_import_repository_key 55A0B34D49501BB7CA474F5AA193FBB572174FC2
                                    )
                            fi
                            (
                                    set -x
                                    zypper -n install docker
                            )
                            echo_docker_as_nonroot
                            exit 0
                            ;;
                    'suse linux'|sle[sd])
                            echo 'Going to perform the following operations:'
                            if [ "$repo" != 'main' ]; then
                                    echo '  * add repository obs://Virtualization:containers'
                                    echo '  * install experimental Docker using packages NOT supported by SUSE'
                            else
                                    echo '  * add the "Containers" module'
                                    echo '  * install Docker using packages supported by SUSE'
                            fi
                            $sh_c 'echo "Press CTRL-C to abort"; sleep 3'
    
                            if [ "$repo" != 'main' ]; then
                                    # install experimental packages from OBS://Virtualization:containers
                                    echo >&2 'Warning: installing experimental packages from OBS, these packages are NOT supported by SUSE'
                                    (
                                            set -x
                                            zypper -n ar -f obs://Virtualization:containers/SLE_12 Virtualization:containers
                                            rpm_import_repository_key 55A0B34D49501BB7CA474F5AA193FBB572174FC2
                                    )
                            else
                                    # Add the containers module
                                    # Note well-1: the SLE machine must already be registered against SUSE Customer Center
                                    # Note well-2: the `-r ""` is required to workaround a known issue of SUSEConnect
                                    (
                                            set -x
                                            SUSEConnect -p sle-module-containers/12/x86_64 -r ""
                                    )
                            fi
                            (
                                    set -x
                                    zypper -n install docker
                            )
                            echo_docker_as_nonroot
                            exit 0
                            ;;
    
                    ubuntu|debian|raspbian)
                            export DEBIAN_FRONTEND=noninteractive
    
                            did_apt_get_update=
                            apt_get_update() {
                                    if [ -z "$did_apt_get_update" ]; then
                                            ( set -x; $sh_c 'sleep 3; apt-get update' )
                                            did_apt_get_update=1
                                    fi
                            }
    
                            if [ "$lsb_dist" = "raspbian" ]; then
                                    # Create Raspbian specific systemd drop-in file, use overlay by default
                                    ( set -x; $sh_c "mkdir -p /etc/systemd/system/docker.service.d" )
                                    ( set -x; $sh_c "echo '[Service]
    ExecStart=
    ExecStart=/usr/bin/dockerd --storage-driver overlay -H fd://' > /etc/systemd/system/docker.service.d/overlay.conf" )
                            else
                                    # aufs is preferred over devicemapper; try to ensure the driver is available.
                                    if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then
                                            if uname -r | grep -q -- '-generic' && dpkg -l 'linux-image-*-generic' | grep -qE '^ii|^hi' 2>/dev/null; then
                                                    kern_extras="linux-image-extra-$(uname -r) linux-image-extra-virtual"
    
                                                    apt_get_update
                                                    ( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true
    
                                                    if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then
                                                            echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)'
                                                            echo >&2 ' but we still have no AUFS.  Docker may not work. Proceeding anyways!'
                                                            ( set -x; sleep 10 )
                                                    fi
                                            else
                                                    echo >&2 'Warning: current kernel is not supported by the linux-image-extra-virtual'
                                                    echo >&2 ' package.  We have no AUFS support.  Consider installing the packages'
                                                    echo >&2 ' linux-image-virtual kernel and linux-image-extra-virtual for AUFS support.'
                                                    ( set -x; sleep 10 )
                                            fi
                                    fi
                            fi
    
                            # install apparmor utils if they're missing and apparmor is enabled in the kernel
                            # otherwise Docker will fail to start
                            if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then
                                    if command -v apparmor_parser >/dev/null 2>&1; then
                                            echo 'apparmor is enabled in the kernel and apparmor utils were already installed'
                                    else
                                            echo 'apparmor is enabled in the kernel, but apparmor_parser is missing. Trying to install it..'
                                            apt_get_update
                                            ( set -x; $sh_c 'sleep 3; apt-get install -y -q apparmor' )
                                    fi
                            fi
    
                            if [ ! -e /usr/lib/apt/methods/https ]; then
                                    apt_get_update
                                    ( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https ca-certificates' )
                            fi
                            if [ -z "$curl" ]; then
                                    apt_get_update
                                    ( set -x; $sh_c 'sleep 3; apt-get install -y -q curl ca-certificates' )
                                    curl='curl -sSL'
                            fi
                            if [ ! -e /usr/bin/gpg ]; then
                                    apt_get_update
                                    ( set -x; $sh_c 'sleep 3; apt-get install -y -q gnupg2 || apt-get install -y -q gnupg' )
                            fi
                           if ! command -v gpg > /dev/null; then
                                   apt_get_update
                                   ( set -x; $sh_c 'sleep 3; apt-get install -y -q gnupg2 || apt-get install -y -q gnupg' )
                           fi
    
                           # dirmngr is a separate package in ubuntu yakkety; see https://bugs.launchpad.net/ubuntu/+source/apt/+bug/1634464
                           if ! command -v dirmngr > /dev/null; then
                                   apt_get_update
                                   ( set -x; $sh_c 'sleep 3; apt-get install -y -q dirmngr' )
                           fi
    
                            (
                            set -x
                            for key_server in $key_servers ; do
                                    $sh_c "apt-key adv --keyserver hkp://${key_server}:80 --recv-keys ${gpg_fingerprint}" && break
                            done
                            $sh_c "apt-key adv -k ${gpg_fingerprint} >/dev/null"
                            $sh_c "mkdir -p /etc/apt/sources.list.d"
                            $sh_c "echo deb [arch=$(dpkg --print-architecture)] ${apt_url}/repo ${lsb_dist}-${dist_version} ${repo} > /etc/apt/sources.list.d/docker.list"
                            $sh_c "sleep 3; apt-get update"
                            $sh_c "apt-get install -y -q docker-engine=$(apt-cache madison docker-engine | grep ${docker_version} | head -n 1 | cut -d ' ' -f 3)"
                            )
                            echo_docker_as_nonroot
                            exit 0
                            ;;
    
                    fedora|centos|redhat|oraclelinux)
                            if [ "${lsb_dist}" = "redhat" ]; then
                                    # we use the centos repository for both redhat and centos releases
                                    lsb_dist='centos'
                            fi
                            $sh_c "cat >/etc/yum.repos.d/docker-${repo}.repo" <<-EOF
                            [docker-${repo}-repo]
                            name=Docker ${repo} Repository
                            baseurl=${yum_url}/repo/${repo}/${lsb_dist}/${dist_version}
                            enabled=1
                            gpgcheck=1
                            gpgkey=${yum_url}/gpg
                            EOF
                            if [ "$lsb_dist" = "fedora" ] && [ "$dist_version" -ge "22" ]; then
                                    (
                                            set -x
                                            $sh_c "sleep 3; dnf -y -q install docker-engine-${docker_version}"
                                    )
                            else
                                    (
                                            set -x
                                            $sh_c "sleep 3; yum -y -q install docker-engine-${docker_version}"
                                    )
                            fi
                            echo_docker_as_nonroot
                            exit 0
                            ;;
                    gentoo)
                            if [ "$url" = "https://test.docker.com/" ]; then
                                    # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output
                                    cat >&2 <<-'EOF'
    
                                      You appear to be trying to install the latest nightly build in Gentoo.'
                                      The portage tree should contain the latest stable release of Docker, but'
                                      if you want something more recent, you can always use the live ebuild'
                                      provided in the "docker" overlay available via layman.  For more'
                                      instructions, please see the following URL:'
    
                                        https://github.com/tianon/docker-overlay#using-this-overlay'
    
                                      After adding the "docker" overlay, you should be able to:'
    
                                        emerge -av =app-emulation/docker-9999'
    
                                    EOF
                                    exit 1
                            fi
    
                            (
                                    set -x
                                    $sh_c 'sleep 3; emerge app-emulation/docker'
                            )
                            exit 0
                            ;;
                    rancheros)
                            (
                            set -x
                            $sh_c "sleep 3; ros engine switch -f $(sudo ros engine list | grep ${docker_version} | head -n 1 | cut -d ' ' -f 2)"
                            )
                            exit 0
                            ;;
            esac
    
            # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output
            cat >&2 <<-'EOF'
    
              Either your platform is not easily detectable, is not supported by this
              installer script (yet - PRs welcome! [hack/install.sh]), or does not yet have
              a package for Docker.  Please visit the following URL for more detailed
              installation instructions:
    
                https://docs.docker.com/engine/installation/
    
            EOF
            exit 1
    }
    
    # wrapped up in a function so that we have some protection against only getting
    # half the file during "curl | sh"
    do_install
    View Code

    2.5.8 docker/download_docker_compose.sh

    #!/bin/bash
    source ../util.sh
    if ! [ -x "$(command -v docker-compose)" ]; then
      sudo curl -L "https://github.com/docker/compose/releases/download/1.25.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
      chmod +x /usr/local/bin/docker-compose
    fi
    View Code

    2.5.9 docker/ignore_docker_auth.sh

    #!/bin/bash
    if [ -f /etc/docker/daemon.json ];then
       mv /etc/docker/daemon.json /etc/docker/daemon.json.`date +'%Y-%m-%d'`
    fi
    mkdir -p /etc/docker/
    echo "{"insecure-registries": ["nexus.calix.ai:8083","docker.calix.local:18080","docker.calix.local:18081"]}"> /etc/docker/daemon.json
    sudo systemctl daemon-reload
    sudo systemctl restart docker
    View Code

    2.5.10 docker/create_meteor_user.sh

    #!/bin/bash
    useradd -s /bin/bash -m meteor
    if [ `cat /etc/issue|grep Ubuntu|wc -l` -eq 1 ];then
       echo -e "meteor
    meteor" | passwd meteor
    else
       echo "meteor" | passwd --stdin meteor
    fi
    sudo echo "meteor ALL=(ALL:ALL) NOPASSWD:ALL" >> /etc/sudoers
    sudo usermod -aG docker meteor
    View Code

    2.5.11 acs_setup/install.sh

    #!/bin/bash
    BASEDIR=$(cd $(dirname $0) && pwd)
    cd $BASEDIR>/dev/null
    ENVDIR=$(cd .. && pwd)
    source ../env.sh
    source ../util.sh
    
    download_software()
    {
       if [ -d ~/3rd_party ];then
          echo -e "${GREEN}3rd_party had already installed , ignore $NC"
          return
       fi
       cd ~>/dev/null
       wget $public_software_address/ccng_acs/keystore.tar.gz
       wget $public_software_address/ccng_acs/build.sh
       wget $public_software_address/ccng_acs/start_redis.sh
       wget $public_software_address/ccng_acs/cicd.sh
       wget $public_software_address/ccng_acs/server.sh
       wget $public_software_address/ccng_acs/restart.sh
       wget $public_software_address/ccng_acs/libs.tar.gz
       tar -zxf libs.tar.gz
       tar -zxf keystore.tar.gz
       chmod +x *.sh
       mkdir -p ~/3rd_party
       cd ~/3rd_party>/dev/null
       wget $public_software_address/3rd_party/apache-maven-3.5.4-bin.tar.gz
       wget $public_software_address/3rd_party/haproxy-1.8.8.tar.gz
       wget $public_software_address/3rd_party/jdk-7u80-linux-x64.tar.gz
       wget $public_software_address/3rd_party/redis-stable.tar.gz
       wget $public_software_address/3rd_party/vert.x-2.1.6.tar.gz
       wget $public_software_address/3rd_party/stunserver-1.2.7.tgz
       tar -zxf *.tgz
       gunzip *.gz
       tar -xf apache-maven-3.5.4-bin.tar
       tar -xf haproxy-1.8.8.tar
       tar -xf jdk-7u80-linux-x64.tar
       tar -xf redis-stable.tar
       tar -xf vert.x-2.1.6.tar
       # build redis
       cd ~/3rd_party/redis-stable
       make
       make test
       cd ~/3rd_party/haproxy-1.8.8
       make TARGET=linux2628 USE_ZLIB=1 USE_OPENSSL=1
       cd ~/3rd_party/stunserver
       make
       if [ `echo $JAVA_HOME|grep jdk|wc -l` -eq 1 ];then
          return
       fi
       echo "export JAVA_HOME=/home/sxacc/3rd_party/jdk1.7.0_80">>~/.bashrc
       echo "export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar">>~/.bashrc
       echo "export PATH=$JAVA_HOME/bin:$PATH">>~/.bashrc
       echo "export M2_HOME=/home/sxacc/3rd_party/apache-maven-3.5.4">>~/.bashrc
       echo "export M2=$M2_HOME/bin">>~/.bashrc
       echo "export MAVEN_OPTS="-Xms256m -Xmx512m"">>~/.bashrc
       echo "export PATH=$M2:$PATH:$HOME/bin" >>~/.bashrc
       echo "export VERTX_HOME=~/3rd_party/vert.x-2.1.6">>~/.bashrc
       echo "export PATH=$VERTX_HOME/bin:$PATH">>~/.bashrc
       echo "export REDIS_BUILD=~/3rd_party/redis-stable">>~/.bashrc
       echo "export PATH=$REDIS_BUILD/src:$PATH">>~/.bashrc
       echo "export PATH="$PATH:~/3rd_party/haproxy-1.8.8"">>~/.bashrc
       echo "alias cpe="tail -200f /home/sxacc/logs/cpe-server/current/cpe-server.log"">>~/.bashrc
       echo "alias acs="tail -200f /home/sxacc/logs/acs-server/current/acs-server.log"">>~/.bashrc
       source ~/.bashrc
    }
    
    download_code()
    {
       if [ -d ~/cc_code ];then
         echo -e "${GREEN}cc_code had downloaded , ignore $NC"
         return
       fi
       mkdir -p ~/cc_code
       cd ~/cc_code >/dev/null
       wget $public_software_address/ccng_acs/mgit.sh
       chmod +x *.sh
       git clone ssh://git@stash.calix.local:7999/ccl/vertx-common.git
       git clone ssh://git@stash.calix.local:7999/ccl/task-mgmt
       git clone ssh://git@stash.calix.local:7999/ccl/ccng-acs
    }
    
    build_code()
    {
       cd ~
       source ~/.bashrc
       ./build.sh
    }
    
    download_conf()
    {
       if [ -d ~/conf ];then
          echo -e "${GREEN}conf folder had already exists, ignore $NC"
          return
       fi
       cd ~>/dev/null
       wget $public_software_address/ccng_acs/conf.tar.gz
       tar -zxf conf.tar.gz
       rm -rf *.gz
       cd conf
       cp $ENVDIR/envsubst.sh .
       for s in `ls *.tmp`
       do
         ./envsubst.sh -e ${ENVDIR}/env.sh $s > ${s//.tmp}
       done
       rm envsubst.sh
       rm *.tmp
       chmod +x *.sh
    }
    
    start_single_process()
    {
       echo "Begin Start $1 ..."
       if [ `ps -ef|grep $1|grep -v grep |wc -l` -gt 0 ];then
          echo -e "${GREEN}the process $1 had started, `ps -ef|grep $1|grep -v grep` $NC"
          return
       fi
       $2
    }
    
    start_process()
    {
        start_single_process "redis" "/home/sxacc/start_redis.sh"
        start_single_process "haproxy" "/home/sxacc/scripts/haproxy-monitor.sh"
        start_single_process "stun" "/home/sxacc/scripts/stun-monitor.sh"
        start_single_process "acs" "/home/sxacc/restart.sh"
    }
    
    initial_acs_org50()
    {
       cd $BASEDIR>/dev/null
       ../envsubst.sh -e ../env.sh org_50.json.tmp >./org_50.json
       curl -s -X POST -d @org_50.json --url http://${acs_host}:8081/cc/organization/50
       echo "Check ACS Org Info"
       curl -s http://${acs_host}:8081/cc/organization/50
    }
    
    
    main()
    {
      mkdir -p ~/logs
      execute_command "check_user sxacc" "Check User [sxacc]"
      execute_command "download_software" "Download 3rd party software and setup"
      execute_command "download_conf" "Download configure files and initilize"
      execute_command "download_code" "Download Code"
      execute_command "build_code" "Build CodeBase"
      execute_command "start_process" "Start Process ..."
      execute_command "initial_acs_org50" "Initial Org 50 ACS configure"
    }
    main
    View Code

    2.5.12 acs_setup/uninstall.sh

    #!/bin/bash
    BASEDIR=$(cd $(dirname $0) && pwd)
    cd $BASEDIR>/dev/null
    source ../env.sh
    source ../util.sh
    
    delete_software()
    {
       rm -rf ~/3rd_party
       rm -rf ~/cc_code
       rm -rf ~/*.sh
       rm -rf ~/*.gz
       rm -rf ~/keystore
       rm -rf ~/conf
       rm -rf ~/libs
    }
    
    stop_process()
    {
       cd ~/scripts>/dev/null
       ./server.sh stop cpe
       ./server.sh stop acs
       ps -ef|grep haproxy|grep -v grep |awk '{print $2}'|xargs kill -9
       ps -ef|grep redis|grep -v grep |awk '{print $2}'|xargs kill -9
       ps -ef|grep stun|grep -v grep |awk '{print $2}'|xargs kill -9
    
    }
    main()
    {
      execute_command "check_user sxacc" "Check User [sxacc]"
      execute_command "stop_process" "STOP PROCESS ..."
      execute_command "delete_software" "Download 3rd party software and setup"
    
    }
    main
    View Code

    2.5.13 csc_setup/install.sh

    #!/bin/bash
    BASEDIR=$(cd $(dirname $0) && pwd)
    cd $BASEDIR>/dev/null
    ENVDIR=$(cd .. && pwd)
    source ../env.sh
    source ../util.sh
    
    install_meteor()
    {
       check_command meteor
       retval=$?
       if [ $retval -eq 0 ];then
          export http_proxy=http://172.29.1.8:3128
          export https_proxy=http://172.29.1.8:3128
          curl https://install.meteor.com/ | sh
          unset http_proxy
          unset https_proxy
       fi
    }
    
    install_docker()
    {
       cd $ENVDIR/docker>/dev/null
       check_command docker
       retval=$?
       if [ $retval -eq 0 ];then
         sudo ./install_docker_1.12.6.sh
         sudo usermod -aG docker meteor
       fi
       check_command docker-compose
       retval=$?
       if [ $retval -eq 0 ];then
          sudo ./download_docker_compose.sh
       fi
    }
    
    download_software()
    {
       install_docker
       install_meteor
       cd ~>/dev/null
       if [ -d ~/3rd_party ];then
          echo -e "${GREEN}3rd_party had already installed , ignore $NC"
          return
       fi
       mkdir -p ~/3rd_party
       cd ~/3rd_party>/dev/null
       wget $public_software_address/3rd_party/apache-maven-3.5.4-bin.tar.gz
       wget $public_software_address/3rd_party/jdk-8u171-linux-x64.tar.gz
       tar -zxf apache-maven-3.5.4-bin.tar.gz
       tar -zxf jdk-8u171-linux-x64.tar.gz
       if [ `echo $JAVA_HOME|grep jdk|wc -l` -eq 1 ];then
          return
       fi
       echo "export JAVA_HOME=/home/meteor/3rd_party/jdk1.8.0_171">>~/.bashrc
       echo "export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar">>~/.bashrc
       echo "export PATH=$JAVA_HOME/bin:$PATH">>~/.bashrc
       echo "export M2_HOME=/home/meteor/3rd_party/apache-maven-3.5.4">>~/.bashrc
       echo "export M2=$M2_HOME/bin">>~/.bashrc
       echo "export MAVEN_OPTS="-Xms256m -Xmx512m"">>~/.bashrc
       echo "export PATH=$M2:$PATH:$HOME/bin" >>~/.bashrc
       source ~/.bashrc
    }
    
    download_code()
    {
       cd $BASEDIR>/dev/null
       if [ -d ~/cloud-coper ];then
         echo -e "${GREEN} code had downloaded , ignore $NC"
         return
       fi
       rm -rf ~/git_*.sh
       cp git_*.sh ~
       cd ~ >/dev/null
       ./git_csc.sh
       ./git_meteor.sh
       cd ~/cloud-app-ui/
       export http_proxy=http://172.29.1.8:3128
       export https_proxy=http://172.29.1.8:3128
       meteor npm install
       unset http_proxy
       unset https_proxy
    }
    
    build_code()
    {
       cd $BASEDIR >/dev/null
       cp build.sh ~
       cd ~>/dev/null
       ./build.sh
       ./build.sh
    }
    
    start_process()
    {
       rm -rf ${HOME}/scripts
       create_folder "${HOME}/scripts"
       create_folder "${HOME}/export_data"
       create_folder "${HOME}/conf"
       ./init_parameter.sh
       cp $BASEDIR/meteor_restart.sh ${HOME}/scripts
       sed -i "s|mongodb|${cloud_mongo_url}|g" ${HOME}/scripts/meteor_restart.sh
       sed -i "s|root_host|${cloud_api_host}|g" ${HOME}/scripts/meteor_restart.sh
       cp $BASEDIR/admin.json ${HOME}/scripts/
       cp $BASEDIR/create_org_user.sh ${HOME}/scripts/
       cp $BASEDIR/docker_restart.sh ${HOME}/scripts/
       sed -i "s|cloud_postgres_password|$cloud_postgres_password|g"  ${HOME}/scripts/create_org_user.sh
       sed -i "s|cloud_postgres_host|$postgres_ip|g"  ${HOME}/scripts/create_org_user.sh
       sed -i "s|cloud_postgres_db|$postgres_db|g"  ${HOME}/scripts/create_org_user.sh
       sed -i "s|cloud_postgres_user|$cloud_postgres_username|g"  ${HOME}/scripts/create_org_user.sh
       sed -i "s|cloud_api_host|$cloud_api_host|g" ${HOME}/scripts/create_org_user.sh
       chmod +x ${HOME}/scripts/*.sh
       cd ${HOME}/scripts >/dev/null
       export http_proxy=http://172.29.1.8:3128
       export https_proxy=http://172.29.1.8:3128
       ./docker_restart.sh
       ./meteor_restart.sh start
       unset http_proxy
       unset https_proxy
    
    }
    
    
    main()
    {
      execute_command "check_user meteor" "Check User [meteor]"
      execute_command "download_software" "Download 3rd party software and setup"
      execute_command "download_code" "Download Code"
      execute_command "build_code" "Build CodeBase"
      execute_command "start_process" "Start Process ..."
    }
    
    main
    View Code

    2.5.14 csc_setup/uninstall.sh

    #!/bin/bash
    BASEDIR=$(cd $(dirname $0) && pwd)
    cd $BASEDIR>/dev/null
    source ../env.sh
    source ../util.sh
    
    delete_software()
    {
       rm -rf ~/3rd_party
       rm -rf ~/*.sh
       rm -rf ~/*.gz
       rm -rf ~/cloud-*
       rm -rf ~/cmdctr*
       rm -rf ~/ccng*
       rm -rf ~/scripts
       rm -rf ~/conf
    
    }
    
    stop_process()
    {
       cd ~/scripts>/dev/null
       ./server.sh stop cpe
       ./server.sh stop acs
       ps -ef|grep haproxy|grep -v grep |awk '{print $2}'|xargs kill -9
       ps -ef|grep redis|grep -v grep |awk '{print $2}'|xargs kill -9
       ps -ef|grep stun|grep -v grep |awk '{print $2}'|xargs kill -9
    
    }
    main()
    {
      execute_command "check_user meteor" "Check User [meteor]"
      #execute_command "stop_process" "STOP PROCESS ..."
      execute_command "delete_software" "Download 3rd party software and setup"
    
    }
    main
    View Code

    2.5.15 csc_setup/docker_restart.sh

    #!/bin/bash
    cd ~/scripts >/dev/null
    module=$1
    flg=1
    if [ "X$module" = "X" ]; then
      flg=0
    fi
    
    restart_process()
    {
      echo "Begin Restart $1"
      docker-compose -f $1 down
      docker-compose -f $1 up -d
    
    }
    
    for f in traefik.yaml org.yaml golden_service.yaml csc_service.yaml
    do
      if [ $flg -eq 1 ];then
         if [ "$1" = "$f" ];then
            restart_process $f
         fi
      else
          restart_process $f
      fi
    done
    View Code

    2.5.16 csc_setup/traefik.yaml.tmp

    traefik:
      image: traefik:v1.1.2
      command: --web --docker --docker.domain=docker.localhost --logLevel=DEBUG
      ports:
        - "80:80"
        - "8080:8080"
      volumes:
        - /var/run/docker.sock:/var/run/docker.sock
        - /dev/null:/traefik.toml
    View Code

    2.5.17 csc_setup/golden_service.yaml.tmp

    cloud-subscriber-sync:
      image: cloud-subscriber:s-19.4-0.0.1-54
      ports:
        - "13424:3424"
      environment:
        - KAFKA_SERVERS=${kafka_bootstrap_server}:9092
        - ES_HOSTS=${es_host}
        - SERVER_MODE=index
        - DB_URL=jdbc:postgresql://${postgres_ip}:5432/${postgres_db}
        - DB_USERNAME=${cloud_postgres_username}
        - DB_PASSWORD=${cloud_postgres_password}
        - INDEX_ENABLED="true"
        - ES_INDEX_PREFIX=onecloud-${env_name}
      labels:
        - "traefik.backend=cloud-subscriber-sync"
        - "traefik.port=3424"
        - "traefik.frontend.rule=PathPrefixStrip:/subscriber/index"
    
    cloud-search-api:
      image: cloud-subscriber:s-19.4-0.0.1-54
      ports:
        - "23424:3424"
      environment:
        - JAVA_OPTIONS=-server -Xmx1g -XX:+UseG1GC -Djava.net.preferIPv4Stack=true
        - ES_HOSTS=${es_host}
        - SERVER_MODE=search
        - METRICS="true"
        - DB_URL=jdbc:postgresql://${postgres_ip}:5432/${postgres_db}
        - DB_USERNAME=${cloud_postgres_username}
        - DB_PASSWORD=${cloud_postgres_password}
        - SEARCH_ENABLED="true"
        - KAFKA_SERVERS=${kafka_bootstrap_server}:9092
        - ES_INDEX_PREFIX=onecloud-${env_name}
      labels:
        - "traefik.backend=cloud-search-api"
        - "traefik.port=3424"
        - "traefik.frontend.rule=PathPrefixStrip:/subscriber/search"
    
    report:
      image: cloud-report-server:s-19.4-0.0.1-33
      ports:
        - "18080:8080"
      volumes:
        - $HOME/export_data:/marketing-cloud/data/export-files/
      environment:
        - CCPlus_DB_MAX_POOL_SIZE=10
        - CCPlus_DB_URL=jdbc:redshift://${wifi_redshift_host}:5439/${wifi_redshift_db}
        - CCPlus_DB_USERNAME=${wifi_redshift_username}
        - CCPlus_DB_PASSWORD=${wifi_redshift_password}
        - CCPlus_pg_DB_MAX_POOL_SIZE=10
        - CCPlus_pg_DB_URL=jdbc:postgresql://${postgres_ip}:5432/${postgres_db}
        - CCPlus_pg_DB_USERNAME=${cloud_postgres_username}
        - CCPlus_pg_DB_PASSWORD=${cloud_postgres_password}
        - FA_greenplum_DB_URL=jdbc:postgresql://${greenplum_host}:${greenplum_port}/${greenplum_db}
        - FA_greenplum_DB_USERNAME=${greenplum_username}
        - FA_greenplum_DB_PASSWORD=${greenplum_password}
      labels:
        - "traefik.backend=report"
        - "traefik.port=8080"
        - "traefik.frontend.rule=PathPrefix:/report"
    
    
    portable-subscriber:
      image: cloud-portable-subscriber-ms:s-19.4-0.0.1-72
      environment:
        - SERVER_TIME_ZONE=UTC
        - GREENPLUM_URL=jdbc:postgresql://${greenplum_host}:${greenplum_port}/${greenplum_db}
        - GREENPLUM_USERNAME=${greenplum_username}
        - GREENPLUM_PASSWORD=${greenplum_password}
        - POSTGRES_URL=jdbc:postgresql://${postgres_ip}:5432/${postgres_db}
        - POSTGRES_USERNAME=${cloud_postgres_username}
        - POSTGRES_PASSWORD=${cloud_postgres_password}
        - KAFKA_BOOTSTRAP_SERVER=${kafka_bootstrap_server}:9092
        - KAFKA_CLIENT_ID=${cloud_api_host}
        - ACS_CC_API_URL=http://${acs_host}:8081
      labels:
        - "traefik.backend=portable-subscriber"
        - "traefik.port=9112"
        - "traefik.frontend.rule=PathPrefix:/subscribers"
    View Code

    2.5.18 csc_setup/org.yaml.tmp

    cloud-logger:
      image: cloud-logger:s-19.4-0.0.1-27
      environment:
        - LOGSERVER="logstash"
        - LOGSERVER_TCPPORT=80
        - ESSERVER="${es_host}"
        - ESPORT=9200
        - JBOSS_HOST="http://cdc-kylin.calix.local:8080"
        - JBOSS_AUTH="admin@calix.com:admin"
      labels:
        - "traefik.backend=cloud-logger"
        - "traefik.port=8076"
        - "traefik.frontend.rule=PathPrefix:/logger"
    
    cloud-authz:
      image: authz:s-19.4-0.0.1-48
      ports:
        - "28849:28849"
      environment:
        - JAVA_OPTIONS=-server -Xmx1G -XX:+UseG1GC -Djava.net.preferIPv4Stack=true
        - LOG_LAYOUT=json
        - PGSQL_JDBC_PASSWORD=${cloud_postgres_password}
        - PGSQL_JDBC_URL=jdbc:postgresql://${postgres_ip}:5432/${postgres_db}
        - PGSQL_JDBC_USERNAME=${cloud_postgres_username}
      volumes:
        - ./command-center.properties:/cloud/sync/command-center.properties
      labels:
        - "traefik.backend=cloud-authz"
        - "traefik.port=28849"
        - "traefik.frontend.rule=PathPrefix:/authz"
    
    cloud-org:
      image: cloud-org-management:s-19.4-0.0.1-25
      environment:
        - SPRING_PROFILES_ACTIVE=prod
        - DATASOURCE_URL=jdbc:postgresql://${postgres_ip}:5432/${postgres_db}
        - DATASOURCE_SCHEMA=public
        - DATASOURCE_USERNAME=${cloud_postgres_username}
        - DATASOURCE_PASSWORD=${cloud_postgres_password}
        - SPID_SOURCE_STRING=xx
        - SPID_STRING_LENGTH=10
        - SPID_SYNC=false
        - ENTITLEMENT_EXPIRY_GRACE_DAYS=45
        - ENTITLEMENT_DEFAULT_EXPIRY_GRACE_DAYS=30
        - ACS_RESOURCE=http://${acs_host}:8081
        - ACS_PATH=/cc/organization/
        - CLOUD_ORG_EXTERNALACSNBIAPIURL=http://${acs_host}:8082
        - CLOUD_ORG_ACSHTTPSPORT=8443
        - CLOUD_ORG_APIHTTPSPORT=8444
        - CLOUD_ORG_ACSIPV6URL=https://${acs_host}:8443
        - CLOUD_ORG_CPEACSURL=http://${acs_host}:8080
        - ACS_CRED_CHARS=xx
        - ACS_USER_USERNAME_LENGTH=5
        - ACS_USER_PASSWORD_LENGTH=15
        - API_CLIENT_USERNAME_LENGTH=5
        - API_CLIENT_PASSWORD_LENGTH=15
        - ACS_USER_USERNAME_PREFIX=acs-user-
        - API_CLIENT_USERNAME_PREFIX=api-user-
        - MAP_IDLOOKUP_AUTH_TOKEN=xx==
        - MAP_IDLOOKUP_RESOURCE=https://xx/idlookup
        - MAP_IDLOOKUP_SPID_PATH=/spId
        - MAP_IDLOOKUP_ORGID_PATH=/orgId
        - MAP_IDLOOKUP_REMOVE_SPID_PATH=/remove
        - MAP_IDLOOKUP_ADD_ORGID_SPID_PATH=/add
        - MAX_TOTAL_CONNECTIONS=100
        - DEFAULT_MAX_TOTAL_CONNECTIONS=20
        - CONN_REQ_TIMEOUT=300000
        - SOCKET_TIMEOUT=300000
        - SALESFORCE_SYNC=false
        - SALESFORCE_HOST=https://login.salesforce.com
        - SALESFORCE_CLIENDID=3MVG9VmVOCGHKYBRjY43Vs1YhJUmdEOSixCqtoU7ZIRakbNRCelwOU5JHkFWr6zWftHaECXvFjlLkTO7BDwMS
        - SALESFORCE_CLIENTSECRET=xx
        - SALESFORCE_USERNAME=xxx
        - SALESFORCE_PASSWORD=xxx
        - SALESFORCE_PARTNUMBER_CSC_CAF=130-00423
        - SALESFORCE_ORG_SYNC_UPTO_DAYS=1
        - SALESFORCE_ENTITLEMENT_SYNC_UPTO_DAYS=1
        - SALESFORCE_PARTNUMBER_CMC=130-00383,130-00383-,130-00414
        - SALESFORCE_PARTNUMBER_CMC_PRO=130-00391
        - SALESFORCE_PARTNUMBER_CMC_PLATFORM=130-00390
        - SALESFORCE_PARTNUMBER_CSC=130-00394,130-00415
        - SALESFORCE_PARTNUMBER_CSC_BASIC=130-00410
        - SALESFORCE_ENTITLEMENT_CRON_CONF="0 0/20 * * * ?"
        - SALESFORCE_ORG_CRON_CONF="0 0/15 * * * ?"
        - SALESFORCE_INCLUDED_COUNTRIES=""
        - SALESFORCE_ERRORS_TO_BE_NOTIFIED=false
        - SALESFORCE_MAIL_SMTP_HOST=""
        - SALESFORCE_MAIL_SMTP_PORT=""
        - SALESFORCE_MAIL_USERNAME=""
        - SALESFORCE_MAIL_PASSWORD=""
        - SALESFORCE_MAIL_TO=""
        - AUTHZ_URL=http://${cloud_api_host}/authz
        - AUTHZ_PROV_URL=/provision?update
        - CLOUD_AUDIT_SERVICE_URL=http://${cloud_api_host}/logger/auditlog
      labels:
        - "traefik.backend=cloud-org"
        - "traefik.port=8080"
        - "traefik.frontend.rule=PathPrefix:/organizations"
        - "traefik.frontend.rule=PathPrefix:/entitlements"
    View Code

    2.5.19 csc_setup/csc_service.yaml.tmp

    coper:
      image: cloud-coper:s-19.4-0.0.1-35
      environment:
        - JAVA_OPTIONS=-server -Xmx1G -XX:+UseG1GC -Djava.net.preferIPv4Stack=true -Dvertx.options.maxWorkerExecuteTime=300000000000
        - WEB_PATH_PREFIX=/coper
        - ACS_URL=http://${acs_host}:8081
        - HTTP_POOL_SIZE=500
        - LOG_LAYOUT=json
        - PGSQL_JDBC_URL=jdbc:postgresql://${postgres_ip}:5432/${postgres_db}
        - PGSQL_JDBC_USERNAME=${cloud_postgres_username}
        - PGSQL_JDBC_PASSWORD=${cloud_postgres_password}
        - redis_sentinel_hosts=${redis_url}
        - redis_master_name=${redis_master_name}
        - redis_master_auth=${redis_pass}
        - redis_database_index=${redis_database_index}
        - redis_pool_max_total=8
        - redis_pool_max_idle=8
        - redis_pool_min_idle=0
        - redis_timeout=2000
        - JOB_SERVICE_URL=http://${cloud_api_host}/jobservice/job
        - MAX_CONCURRENT_OPS_PER_WORKER=500
        - SITE_SCAN_TASK_QUEUE_METRIC_INTERVAL=10
        - SITE_SCAN_WORKER_METRIC_INTERVAL=5
        - SITE_SCAN_MAX_BATCH_GAP=120
        - SITE_SCAN_MIN_BATCH_GAP=2
        - SITE_SCAN_MAX_BATCH_SIZE=100
        - SITE_SCAN_DEFAULT_DEVICE_OP_TIMEOUT=120
        - SITE_SCAN_RESULTS_TTL=7
        - CLUSTER=false
        - CLUSTER_LOCKS_QUORUM=2
        - METRICS=false
      labels:
        - "traefik.backend=coper"
        - "traefik.port=26737"
        - "traefik.frontend.rule=PathPrefix:/coper"
    
    diag:
      image: cloud-diag:s-19.4-0.0.1-43
      environment:
        - JAVA_OPTIONS=-server -Xmx1G -XX:+UseG1GC -Djava.net.preferIPv4Stack=true
        - WEB_PATH_PREFIX=/diag
        - REDSHIFT_JDBC_URL=jdbc:pivotal:greenplum://${greenplum_host}:${greenplum_port};DatabaseName=${greenplum_db}
        - REDSHIFT_JDBC_USERNAME=${greenplum_username}
        - REDSHIFT_JDBC_PASSWORD=${greenplum_password}
        - WIFI_REDSHIFT_JDBC_URL=jdbc:redshift://${wifi_redshift_host}:${wifi_redshift_port}/${wifi_redshift_db}
        - WIFI_REDSHIFT_JDBC_USERNAME=${wifi_redshift_username}
        - WIFI_REDSHIFT_JDBC_PASSWORD=${wifi_redshift_password}
        - COPER_URL=http://${cloud_api_host}/coper
        - LOG_LAYOUT=json
        - METRICS=false
        - database_primary_driver_class_name=com.pivotal.jdbc.GreenplumDriver
        - WIFI_REDSHIFT_DRIVER_CLASS=com.amazon.redshift.jdbc42.Driver
        - HIKARI_MAX_POOL_SIZE=2
      labels:
        - "traefik.backend=diag"
        - "traefik.port=3424"
        - "traefik.frontend.rule=PathPrefix:/diag"
    
    wifi-optimization:
      image: cloud-wifi-optimization:s-19.4-0.0.1-43
      environment:
        - JAVA_OPTIONS=-server -Xmx1G -XX:+UseG1GC -Djava.net.preferIPv4Stack=true
        - WEB_PATH_PREFIX=/wifi-optimization
        - KAFKA_BOOTSTRAP_SERVERS=${kafka_bootstrap_server}:9092
        - PGSQL_JDBC_URL=jdbc:postgresql://${postgres_ip}:5432/${postgres_db}
        - PGSQL_JDBC_USERNAME=${cloud_postgres_username}
        - PGSQL_JDBC_PASSWORD=${cloud_postgres_password}
        - ACS_URL=http://${acs_host}:8081
        - AUDITLOG_URL=http://${cloud_api_host}/logger/auditlog
        - database_primary_driver_class_name=com.amazon.redshift.jdbc42.Driver
        - database_primary_database_jdbc_url=jdbc:redshift://${wifi_redshift_host}:5439/${wifi_redshift_db}
        - database_primary_database_username=${wifi_redshift_username}
        - database_primary_database_password=${wifi_redshift_password}
        - database_primary_database_poolsize=2
        - redis_sentinel_hosts=${redis_url}
        - redis_master_name=${redis_master_name}
        - redis_master_auth=${redis_pass}
        - redis_database_index=${redis_database_index}
        - redis_pool_max_total=8
        - redis_pool_max_idle=8
        - redis_pool_min_idle=0
        - redis_timeout=2000
        - COPER_URL=http://${cloud_api_host}/coper
        - LOG_LAYOUT=json
        - scheduler_mongo_batch_size=500
        - SUBSCRIBER_API_URL=http://${cloud_api_host}/subscribers
        - SUBSCRIBER_API_TOKEN=xx
      labels:
        - "traefik.backend=wifi-optimization"
        - "traefik.port=8085"
        - "traefik.frontend.rule=PathPrefix:/wifi-optimization"
    
    job-service:
      image: cloud-job-service:s-19.4-0.0.1-43
      environment:
        - JAVA_OPTIONS=-server -Xmx1G -XX:+UseG1GC -Djava.net.preferIPv4Stack=true
        - PGSQL_JDBC_URL=jdbc:postgresql://${postgres_ip}:5432/${postgres_db}
        - PGSQL_JDBC_USERNAME=${cloud_postgres_username}
        - PGSQL_JDBC_PASSWORD=${cloud_postgres_password}
        - CSC_TASK_TRIGGER_ADDRESS_SUBSCRIBER_REPORT=http://${cloud_api_host}/subscriber-report/cloud/task/submit
        - CSC_RESULT_RETRIEVE_ADDRESS_SUBSCRIBER_REPORT=http://${cloud_api_host}/subscriber-report/cloud/task/retriveTaskResult
        - CSC_TASK_TRIGGER_AUTO_SITE_SCAN=http://${cloud_api_host}/coper/wifi/neighbor/org-exec
        - WEB_PORT=8086
        - WEB_PATH_PREFIX=/jobservice
      labels:
        - "traefik.backend=job-service"
        - "traefik.port=8086"
        - "traefik.frontend.rule=PathPrefix:/jobservice"
    
    
    net-perf-test:
      image: cloud-net-perf-test:s-19.4-0.0.1-43
      environment:
        - JAVA_OPTIONS=-server -Xmx1G -XX:+UseG1GC -Djava.net.preferIPv4Stack=true
        - PGSQL_JDBC_URL=jdbc:postgresql://${postgres_ip}:5432/${postgres_db}
        - PGSQL_JDBC_USERNAME=${cloud_postgres_username}
        - PGSQL_JDBC_PASSWORD=${cloud_postgres_password}
        - JOB_SERVICE_URL=http://${cloud_api_host}/jobservice
        - RUN_TEST_NOW_API_URL=http://${cloud_api_host}/net-perf-test/net-perf-test/runTestNow
        - ACS_URL=http://${acs_host}:8081
        - HTTP_IDLE_TIMEOUT=15
        - HTTP_CONNECTION_TIMEOUT=30000
        - HTTP_POOL_SIZE=9
        - MAP_CAF_II_SERVICE_URL=https://stage.rgw.calix.ai/cafii/v1/speedtest/
        - MAP_CAF_II_SERVICE_AUTH_HEADER=xx
      labels:
        - "traefik.backend=net-perf-test"
        - "traefik.port=8087"
        - "traefik.frontend.rule=PathPrefix:/net-perf-test"
    
    
    subscriber-report:
      image: cloud-subscriber-report:s-19.4-0.0.1-43
      environment:
        - JAVA_OPTIONS=-server -Xmx2G -XX:+UseG1GC -Djava.net.preferIPv4Stack=true
        - task_service_admin_port=7071
        - task_service_web_api_port=8084
        - task_service_web_api_session_timeout=200
        - task_service_job_callback_url=http://${cloud_api_host}/jobservice/job
        - task_service_task_callback_url=http://${cloud_api_host}/jobservice/taskExecLog
        - task_service_http_client_connection_timeout=200
        - task_service_http_client_idle_timeout=200
        - task_service_http_client_pool_size=8
        - database_primary_driver_class_name=com.amazon.redshift.jdbc42.Driver
        - database_primary_database_jdbc_url=jdbc:redshift://${wifi_redshift_host}:5439/${wifi_redshift_db}
        - database_primary_database_username=${wifi_redshift_username}
        - database_primary_database_password=${wifi_redshift_password}
        - database_primary_database_poolsize=2
        - redis_sentinel_hosts=${redis_url}
        - redis_master_name=${redis_master_name}
        - redis_master_auth=${redis_pass}
        - redis_database_index=${redis_database_index}
        - redis_pool_max_total=8
        - redis_pool_max_idle=8
        - redis_pool_min_idle=0
        - redis_timeout=2000
        - cache_retry_time=3
        - cache_retry_interval=1000
        - JOB_RESULT_DIR=/support-cloud/report-result
        - task_service_mail_smtp_host=eng-smtp.calix.local
        - task_service_mail_smtp_port=25
        - task_service_mail_smtp_auth=false
        - task_service_mail_smtp_account=NoReply-CSC@calix.com
        - task_service_mail_smtp_password=Calix123
      labels:
        - "traefik.backend=subscriber-report"
        - "traefik.port=8084"
        - "traefik.frontend.rule=PathPrefix:/subscriber-report"
    View Code

    2.5.20 csc_setup/refresh_view.sh.tmp

    #!/bin/bash
    export PGPASSWORD=$cloud_postgres_password
    psql -h $postgres_ip -d $postgres_db -U $cloud_postgres_username -c "REFRESH MATERIALIZED VIEW CONCURRENTLY device_view;"
    View Code

    3. Postgres

    3.1 .psqlrc

    3.1.1 check active session

    -- check active session
    set active_session 'select pid,usename,datname,application_name,client_addr,age(clock_timestamp(), query_start),query from pg_stat_activity where pid<>pg_backend_pid() and state='active' order by query_start desc;'
    View Code

    3.1.2 all_reslove_session

    -- all_reslove_session
    set session 'select pid,usename,datname,application_name,client_addr,age(clock_timestamp(), query_start),query from pg_stat_activity where pid<>pg_backend_pid() and state='idle' and upper(query) not like 'SET%' and upper(query) not like 'SHOW%' and query != 'COMMIT' order by query_start desc;'
    View Code

    3.1.3 check wait events

    -- check wait events
    set wait_event 'select pid,application_name,client_addr,age(clock_timestamp(),query_start),state,wait_event_type,wait_event from pg_stat_activity where pid<>pg_backend_pid() and wait_event is not null order by wait_event_type;'
    View Code

    3.1.4 table_size

    -- table_size
    set table_size 'select table_name,pg_size_pretty(total_bytes) AS total, pg_size_pretty(index_bytes) AS idx , pg_size_pretty(toast_bytes) AS toast , pg_size_pretty(table_bytes) AS relsize from (select *, total_bytes-index_bytes-COALESCE(toast_bytes,0) AS table_bytes FROM ( SELECT c.oid,nspname AS table_schema, relname AS table_name , c.reltuples AS row_estimate , pg_total_relation_size(c.oid) AS total_bytes , pg_indexes_size(c.oid) AS index_bytes , pg_total_relation_size(reltoastrelid) AS toast_bytes FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace WHERE relkind = 'r' and relname in (select tablename from pg_tables where schemaname='public') ) a) a order by total_bytes desc;'
    View Code

    3.1.5 database size

    --database size
    set database_size 'SELECT d.datname AS Name,pg_catalog.pg_get_userbyid(d.datdba) AS Owner,CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_size_pretty(pg_catalog.pg_database_size(d.datname))  ELSE 'No Access'  END AS SIZE FROM pg_catalog.pg_database d ORDER BY CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE NULL END DESC LIMIT 20;'
    View Code

    3.1.6 redshift running sqls

    --redshift running sqls
    set redshift_run 'SELECT pid,starttime,duration,trim(user_name) AS user,trim(query) AS querytxt FROM stv_recents WHERE STATUS = 'Running' order by starttime desc;'
    View Code

    3.2 dump_pg_schema.sh

    #!/bin/bash
    export PGPASSWORD='postgres'
    pg_dump -h localhost -d cloud -s  --exclude-table=cloud_subscriber_devices_0227,calixcalldisposition_backup,cloud_subscriber_devices_0227,cloud_subscribers_0227,david_billing,dv2,sxacc_devices_backup,sxaimsubscriber_next_endpoint_id_bak,sxaimsubscriber_next_endpoint_id_old,tblsizestats,csc_site_scan_results_* -U calixcloud -f schma.sql
    #pg_dump -h localhost -d cloud -s -F c 
    #--exclude-table cloud_subscriber_devices_0227 calixcalldisposition_backup cloud_subscriber_devices_0227 cloud_subscribers_0227 david_billing dv2 sxacc_devices_backup sxaimsubscriber_next_endpoint_id_bak sxaimsubscriber_next_endpoint_id_old tblsizestats csc_site_scan_results_* 
    #-U calixcloud -f schma.sql
    View Code

     

    kubectl describe clusterrole system:kube-scheduler
  • 相关阅读:
    路由网址这是mvc时代系列之三:网络路由与ASP.NET MVC生命周期(上)
    调用代码JCFXBL与WebView整合打造安卓应用服务平台
    服务安装Mongodb应用实战教程:windows下安装与安全策略配置
    方法选择Android从SD卡中选择图片的三种方法及自定义系统选择框标题的方法
    图层照片如何扣头发丝
    对象方法JavaScript脚本语言初解
    数据库方法django中ModelForm学习系列一~save方法
    团队位置《楚汉传奇》聊管理
    包查找*.so对应的rpm包
    现实世界的Windows Azure:就Metanga采访MetraTech公司CEO,Scott Swartz先生
  • 原文地址:https://www.cnblogs.com/tben/p/12721245.html
Copyright © 2011-2022 走看看