zoukankan      html  css  js  c++  java
  • 企业内部从零开始安装docker hadoop 提纲

    下载apache 项目  http://mirror.bit.edu.cn/apache/

    下载 centos 7 安装 盘 iso 大约7G

    安装 centos7

    copy 光盘盘中的 packages repodata  到硬盘

    建立 httpd 服务修改 /etc/httpd/conf/httpd.conf 中的 docmentroot ??

    service httpd start   .

      如果有selinux  注意semanage  chcon restorecon  命令 保持 与 /var/www 一致的上下文 ,使用 ls -Z 查看

    web网站建立后

    编写 /etc/yum.repo.d 中的 文件 ,

    测试 yum 命令  :  yum clean all ;yum makecache

    如果有新的 rpm 也可放到 package目录 不过要使用 createrepo 重新建立 索引数据库

    下载 docker 1.9

    使用 rpm 安装

    测试 service docker start

    到 csphere 下载 安装 分析安装sh(找一个centos虚拟机在互联网上安装 然后 使用 docker save ;docker load 装载到企业本地) 主要管理docker 方便

    使用网上的一个脚本建立 centos 的docker image https://raw.githubusercontent.com/docker/docker/master/contrib/mkimage-yum.sh

    起名 centos

    基于centos ,建立 jdk8  sshd 起名 jdk8:centos7

    from centos7:7.2.1511
     
    Add jdk-8u65-linux-x64.gz /usr/sbin
    env JAVA_HOME  /usr/sbin/jdk1.8.0_65
    env CLASSPATH   /usr/sbin/jdk1.8.0_65/lib/dt.jar:/usr/sbin/jdk1.8.0_65/lib/tool.jar
    
    run echo   "JAVA_HOME=$JAVA_HOME;export JAVA_HOME;" >>/etc/profile
    run echo   "CLASSPATH=$CLASSPATH:$JAVA_HOME;export CLASSPATH;" >>/etc/profile
    run  echo  "PATH=$PATH:$JAVA_HOME/bin;export  PATH ;">>/etc/profile
    run  echo  "PATH=$PATH:$JAVA_HOME/bin;export  PATH ;">>/etc/bashrc
    run rm -f /etc/yum.repos.d/Cent* 
    add   yum.repo /etc/yum.repos.d
    run systemctl enable sshd.service
    run /usr/lib/systemd/systemd --system &
    run yum -y install which  openssl sshd wget net-tools openssh-client openssh-server
    run ssh-keygen -t dsa -f /etc/ssh/ssh_host_dsa_key -N ""
    run ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key -N ""
    run ssh-keygen -t ecdsa -f /etc/ssh/ssh_host_ecdsa_key -N ""  
    run ssh-keygen -t ed25519 -f /etc/ssh/ssh_host_ed25519_key -N ""  
    run     /usr/sbin/sshd 
    run echo root | passwd root --stdin 
    run yum makecache &&yum clean all
    run   ssh-keygen -t rsa -f ~/.ssh/id_rsa -N "";  cat ~/.ssh/id_rsa.pub >>~/.ssh/authorized_keys
    run echo "StrictHostKeyChecking no " >>~/.ssh/config
    
    Entrypoint   /usr/sbin/sshd;/bin/bash 
    
     
     
     
     
    [local]
    name=local
    baseurl=http://XXX.XXX/yum
    enable=1
    gpgcheck=0

     yum.repo

    基于jdk8:centos7 建立 hadoop2.6

    from jdk8:centos7
     
    Add  hadoop-2.6.2.tar.gz  /home/
    
    run ln -s /home/hadoop-2.6.2/ /home/hadoop   && cd /home/hadoop
    
    workdir /home/hadoop
    expose 22 50070 
    copy etc  /home/hadoop/etc/hadoop
     
    run  echo  "export PATH=$PATH:$JAVA_HOME/bin:/home/hadoop/sbin:/home/hadoop/bin;">>/etc/profile
    run  echo  "export PATH=$PATH:$JAVA_HOME/bin:/home/hadoop/sbin:/home/hadoop/bin;" >>/etc/bashrc
    run systemctl enable sshd.service
    run /usr/lib/systemd/systemd --system &
     
    copy hadoop-config.sh  /home/hadoop/libexec
    Entrypoint  /usr/sbin/sshd;/bin/bash
     

    测试 单节点hadoop 能否启动 (出现java没找到 ,手工修改 /libexec 中的 hadoop-config.sh )

    start-dfs.sh start-yarn.sh

    zoo image 的 dockerfile

    from hadoop
    Add  zookeeper-3.4.7.tar.gz /home/
    EXPOSE 16020 16202 16010 60000 60010 22 7373 7946 9000 50010 50020 50070 50075 50090 50475 8030 8031 8032 8033 8040 8042 8060 8088 50060 2888 2181 3888 8480 10020 19888
    run echo "export ZOOKEEPER_HOME=/home/zookeeper-3.4.7" >>/etc/profile
    run echo "export ZOOKEEPER_HOME=/home/zookeeper-3.4.7" >>/etc/bashrc
    
    run  echo  "export PATH=$PATH:$JAVA_HOME/bin:/home/hadoop/sbin:/home/hadoop/bin:/home/zookeeper-3.4.7/bin:/home/zookeeper-3.4.7/conf:/home/hbase-1.0.2/bin" >>/etc/profile
    run  echo  "export PATH=$PATH:$JAVA_HOME/bin:/home/hadoop/sbin:/home/hadoop/bin:/home/zookeeper-3.4.7/bin:/home/zookeeper-3.4.7/conf:/home/hbase-1.0.2/bin" >>/etc/bashrc
    volume /data/hadoop
    copy zoo/zoo.cfg /home/zookeeper-3.4.7/conf/zoo.cfg
    copy  ha_etc/core-site.xml /home/hadoop/etc/hadoop/core-site.xml
    copy  ha_etc/hdfs-site.xml /home/hadoop/etc/hadoop/hdfs-site.xml
    copy  ha_etc/mapred-site.xml /home/hadoop/etc/hadoop/mapred-site.xml
    copy  ha_etc/yarn-site.xml /home/hadoop/etc/hadoop/yarn-site.xml
    copy  ha_etc/hosts.allow   /data/hadoop/tmp/hosts.allow
    
    copy  ha_etc/slaves_datanode.txt  /home/hadoop/etc/hadoop/slaves
    run mkdir /home/zookeeper-3.4.7/data
    env   HA_ID rm1
    
    Add hbase-1.0.2-bin.tar.gz   /home/
    run sed -i "s/# export JAVA_HOME=/usr/java/jdk1.6.0//export JAVA_HOME=/usr/sbin/jdk1.8.0_65/g"  /home/hbase-1.0.2/conf/hbase-env.sh
    run sed -i "s/# export HBASE_MANAGES_ZK=true/export HBASE_MANAGES_ZK=false/g"  /home/hbase-1.0.2/conf/hbase-env.sh
    run echo "export HBASE_MANAGES_ZK=false" >>/etc/profile
    run echo "export HBASE_MANAGES_ZK=false" >>/etc/bashrc
    Entrypoint  /usr/sbin/sshd;/bin/bash
    

      

     运行 zookeeper 的docker脚本 ,运行后生成 四个容器 (参数 1 )

      1 #!/bin/bash
      2  #更改host
      3  inner_host=127.0.0.1
      4     updateHost()
      5     {
      6     # read
      7     inner_host=`cat /etc/hosts | grep ${in_url} | awk '{print $1}'`
      8     if [ ${inner_host} = ${in_ip} ];then
      9     echo "${inner_host} ${in_url} ok"
     10     else
     11     if [ ${inner_host} != "" ];then
     12     echo " change is ok "
     13     else
     14     inner_ip_map="${in_ip} ${in_url}"
     15     echo ${inner_ip_map} >> /etc/hosts
     16     if [ $? = 0 ]; then
     17     echo "${inner_ip_map} to hosts success host is `cat /etc/hosts`"
     18     fi
     19     echo "shuld appand "
     20     fi
     21     fi
     22     }
     23 # run N slave containers
     24 N=$1
     25 
     26 # the defaut node number is 3
     27 if [ $# = 0 ]
     28 then
     29     N=3
     30 fi
     31 
     32 docker build --rm -t zoo .     
     33 
     34 # delete old master container and start new master container
     35 sudo docker rm -f master_hadoop &> /dev/null
     36 echo "start master container..."
     37 sudo docker run -d -t --dns 127.0.0.1   -v /etc/hosts:/etc/hosts -p 8088:8088 -P -v /data/hadoop/master:/data/hadoop --name master_hadoop -h master.hantongchao.com -w /root zoo &> /dev/null
     38 
     39 ip0=$(docker inspect --format="{{.NetworkSettings.IPAddress}}" master_hadoop)
     40 
     41 serverid=0;
     42 ((serverid++))
     43 #zoo  
     44 echo $serverid > myid
     45 sudo docker cp myid master_hadoop:/home/zookeeper-3.4.7/data/myid
     46 
     47 
     48 # delete old master container and start new nn1 container
     49 sudo docker rm -f nn1_hadoop &> /dev/null
     50 echo "start nn1 container..."
     51 mkdir /data/hadoop/nn1 &> /dev/null
     52 sudo docker run -d -t --dns 127.0.0.1    -p 50070:50070 -p 60000:60000 -p 16010:16010 -v /etc/hosts:/etc/hosts -e "HA_ID=rm1" -P -v /data/hadoop/nn1:/data/hadoop --name nn1_hadoop -h nn1.hantongchao.com -w /root zoo &> /dev/null
     53 ip1=$(docker inspect --format="{{.NetworkSettings.IPAddress}}" nn1_hadoop)
     54 ((serverid++))
     55 echo $serverid > myid
     56 sudo docker cp myid nn1_hadoop:/home/zookeeper-3.4.7/data/myid
     57 #yarn slaves
     58 
     59 # delete old master container and start new nn2 container
     60 sudo docker rm -f nn2_hadoop &> /dev/null
     61 echo "start nn2 container..."
     62 mkdir /data/hadoop/nn2 &> /dev/null
     63 sudo docker run -d -t --dns 127.0.0.1   -p 50071:50070 -v /etc/hosts:/etc/hosts -p 16020:16010 -P -v /data/hadoop/nn2:/data/hadoop --name nn2_hadoop -h nn2.hantongchao.com -w /root zoo &> /dev/null
     64 ip2=$(docker inspect --format="{{.NetworkSettings.IPAddress}}" nn2_hadoop)
     65 
     66 ((serverid++))
     67 
     68 echo $serverid > myid
     69 sudo docker cp myid nn2_hadoop:/home/zookeeper-3.4.7/data/myid
     70 # get the IP address of master container
     71 FIRST_IP=$(docker inspect --format="{{.NetworkSettings.IPAddress}}" master_hadoop)
     72 
     73 # 4-
     74 
     75 
     76  
     77 sudo docker rm -f master1_hadoop &> /dev/null
     78 echo "start master1 container..."
     79 sudo docker run -d -t --dns 127.0.0.1    -v /etc/hosts:/etc/hosts  -e "HA_ID=rm2" -P -v /data/hadoop/master1:/data/hadoop --name master1_hadoop -h master1.hantongchao.com -w /root zoo &> /dev/null
     80 
     81 ip4=$(docker inspect --format="{{.NetworkSettings.IPAddress}}" master1_hadoop)
     82 
     83 ((serverid++))
     84 #zoo  
     85 echo $serverid > myid
     86 sudo docker cp myid master1_hadoop:/home/zookeeper-3.4.7/data/myid
     87 
     88 
     89 
     90 
     91 
     92 # delete old slave containers and start new slave containers
     93 i=1
     94 while [ $i -lt $N ]
     95 do
     96     sudo docker rm -f slave_hadoop$i &> /dev/null
     97     echo "start slave_hadoop$i container..."
     98     mkdir /data/hadoop/$i &> /dev/null
     99     sudo docker run -d -t --dns 127.0.0.1  -v /etc/hosts:/etc/hosts  -P -v /data/hadoop/$i:/data/hadoop --name slave_hadoop$i -h slave$i.hantongchao.com -e JOIN_IP=$FIRST_IP zoo &> /dev/null
    100     in_ip=$(docker inspect --format="{{.NetworkSettings.IPAddress}}" slave_hadoop$i)
    101     in_url=slave$i.hantongchao.com
    102 ((serverid++))
    103     echo $serverid > myid
    104 sudo docker cp myid   slave_hadoop$i:/home/zookeeper-3.4.7/data/myid
    105 sudo docker cp ha_etc/slaves_datanode.txt slave_hadoop$i:/home/hadoop/etc/hadoop/slaves
    106 updateHost
    107     ((i++))
    108 done 
    109 echo $in_ip
    110 in_ip=$ip0
    111 in_url="master.hantongchao.com"
    112 updateHost
    113 #in_url="mycluster"
    114 #updateHost
    115 in_ip=$ip1
    116 in_url="nn1.hantongchao.com"
    117 updateHost
    118 in_ip=$ip2
    119 in_url="nn2.hantongchao.com"
    120 updateHost
    121 
    122 in_ip=$ip4
    123 in_url="master1.hantongchao.com"
    124 updateHost
    125 
    126 #sudo docker cp ha_etc/slaves_nodemanager.txt master_hadoop:/home/hadoop/etc/hadoop/slaves
    127 #sudo docker cp ha_etc/slaves_nodemanager.txt master1_hadoop:/home/hadoop/etc/hadoop/slaves
    128 
    129 sudo docker cp ha_etc/slaves_datanode.txt master_hadoop:/home/hadoop/etc/hadoop/slaves
    130 sudo docker cp ha_etc/slaves_datanode.txt master1_hadoop:/home/hadoop/etc/hadoop/slaves
    131 
    132 sudo docker cp ha_etc/slaves_datanode.txt nn1_hadoop:/home/hadoop/etc/hadoop/slaves
    133 sudo docker cp ha_etc/slaves_datanode.txt nn2_hadoop:/home/hadoop/etc/hadoop/slaves
    134 
    135 
    136 
    137 # create a new Bash session in the master container
    138 sudo docker exec -it master_hadoop /home/zookeeper-3.4.7/bin/zkServer.sh start
    139 sudo docker exec -it nn1_hadoop /home/zookeeper-3.4.7/bin/zkServer.sh start
    140 sudo docker exec -it nn2_hadoop /home/zookeeper-3.4.7/bin/zkServer.sh start
    141 
    142 sudo docker exec -it master_hadoop /home/zookeeper-3.4.7/bin/zkServer.sh status 
    143 
    144 
    145 
    146 echo "journalnode"
    147 sudo docker exec -it master_hadoop /home/hadoop/sbin/hadoop-daemon.sh start journalnode 
    148 sudo docker exec -it nn1_hadoop /home/hadoop/sbin/hadoop-daemon.sh start journalnode 
    149 sudo docker exec -it nn2_hadoop /home/hadoop/sbin/hadoop-daemon.sh start journalnode 
    150 
    151 
    152 
    153 sudo docker exec -it nn1_hadoop  bash -c "/home/hadoop/bin/hdfs namenode -format  -clusterid mycluster"
    154 #sudo docker exec -it nn1_hadoop  scp -r /data/hadoop/tmp/dfs/namedir nn2.hantongchao.com:/data/hadoop/tmp/dfs/
    155 echo namenode -format
    156 #read what
    157 sudo docker exec -it nn1_hadoop /home/hadoop/sbin/hadoop-daemon.sh  start namenode
    158 #sudo docker exec -it nn1_hadoop /home/hadoop/sbin/hadoop-daemon.sh  start secondarynamenode
    159 
    160 #echo nn1 start namenode secondarynamenode
    161 #read what
    162 
    163 sudo docker exec -it nn2_hadoop /home/hadoop/bin/hdfs namenode -bootstrapStandby
    164 sudo docker exec -it nn2_hadoop /home/hadoop/sbin/hadoop-daemon.sh start namenode
    165 
    166 
    167 sudo docker exec -it nn1_hadoop  /home/hadoop/bin/hdfs zkfc -formatZK
    168 sudo docker exec -it nn1_hadoop  /home/hadoop/sbin/hadoop-daemon.sh start  zkfc  
    169 sudo docker exec -it nn2_hadoop  /home/hadoop/sbin/hadoop-daemon.sh start  zkfc 
    170  
    171 sudo docker exec -it nn1_hadoop  /home/hadoop/bin/hdfs haadmin -getServiceState nn1
    172 sudo docker exec -it nn2_hadoop  /home/hadoop/bin/hdfs haadmin -getServiceState nn2
    173 
    174 sudo docker exec -it master_hadoop  bash -c ' /usr/bin/sed -i "s/{HA_ID}/rm1/g" /home/hadoop/etc/hadoop/yarn-site.xml '
    175 sudo docker exec -it master1_hadoop  bash -c ' /usr/bin/sed -i "s/{HA_ID}/rm2/g" /home/hadoop/etc/hadoop/yarn-site.xml '
    176 
    177 
    178 #start-yarn 
    179 sudo docker exec -it master_hadoop     /home/hadoop/sbin/yarn-daemon.sh start resourcemanager
    180 sudo docker exec -it master1_hadoop    /home/hadoop/sbin/yarn-daemon.sh start resourcemanager
    181 sleep 2
    182 sudo docker exec -it master_hadoop /home/hadoop/sbin/yarn-daemon.sh start nodemanager
    183 sudo docker exec -it master1_hadoop /home/hadoop/sbin/yarn-daemon.sh start nodemanager
    184 sudo docker exec -it nn1_hadoop    /home/hadoop/sbin/yarn-daemon.sh start nodemanager
    185 sudo docker exec -it nn2_hadoop    /home/hadoop/sbin/yarn-daemon.sh start nodemanager
    186 sleep 2
    187 
    188 
    189 sudo docker exec -it master_hadoop /home/hadoop/sbin/hadoop-daemon.sh start datanode
    190 sudo docker exec -it master1_hadoop /home/hadoop/sbin/hadoop-daemon.sh start datanode
    191 sudo docker exec -it nn1_hadoop    /home/hadoop/sbin/hadoop-daemon.sh start datanode
    192 sudo docker exec -it nn2_hadoop    /home/hadoop/sbin/hadoop-daemon.sh start datanode
    193 
    194 
    195 sudo docker exec -it master_hadoop     /home/hadoop/sbin/yarn-daemon.sh start proxyserver
    196 sudo docker exec -it master_hadoop     /home/hadoop/sbin/mr-jobhistory-daemon.sh start historyserver
    197 echo "nn1_hadoop jps "
    198 docker exec -it nn1_hadoop /usr/sbin/jdk1.8.0_65/bin/jps
    199 echo "nn2_hadoop jps "
    200 docker exec -it nn2_hadoop /usr/sbin/jdk1.8.0_65/bin/jps
    201 echo "master_hadoop jps "
    202 docker exec -it master_hadoop /usr/sbin/jdk1.8.0_65/bin/jps
    203 echo "master1_hadoop jps "
    204 docker exec -it master1_hadoop /usr/sbin/jdk1.8.0_65/bin/jps
    205 
    206 
    207 
    208 i=1
    209 echo $N
    210 while [ $i -lt $N ]
    211 do
    212 sudo docker cp nn1_hadoop:/home/hadoop/etc/hadoop/slaves tmp_slaves_datanode.txt 
    213 echo -e "slave$i.hantongchao.com" >>tmp_slaves_datanode.txt 
    214 sudo docker cp tmp_slaves_datanode.txt nn1_hadoop:/home/hadoop/etc/hadoop/slaves
    215 sudo docker cp tmp_slaves_datanode.txt nn2_hadoop:/home/hadoop/etc/hadoop/slaves
    216 sudo docker cp tmp_slaves_datanode.txt master_hadoop:/home/hadoop/etc/hadoop/slaves
    217 sudo docker cp tmp_slaves_datanode.txt master1_hadoop:/home/hadoop/etc/hadoop/slaves
    218 
    219 sudo docker cp tmp_slaves_datanode.txt nn1_hadoop:/home/hbase-1.0.2/conf/regionservers
    220 sudo docker cp tmp_slaves_datanode.txt nn2_hadoop:/home/hbase-1.0.2/conf/regionservers
    221 
    222 sudo docker exec -it slave_hadoop$i /home/hadoop/sbin/yarn-daemon.sh start nodemanager
    223 sudo docker exec -it slave_hadoop$i /home/hadoop/sbin/hadoop-daemon.sh start datanode 
    224 echo "slave_hadoop$i jps "
    225 docker exec -it slave_hadoop$i /usr/sbin/jdk1.8.0_65/bin/jps
    226     ((i++)) 
    227 echo $i
    228 done 
    229 
    230 sudo docker exec -it nn1_hadoop  ssh nn2.hantongchao.com  ls 
    231 sudo docker exec -it nn1_hadoop  ssh master1.hantongchao.com  ls 
    232 sudo docker exec -it nn1_hadoop  ssh master.hantongchao.com  ls 
    233 
    234 sudo docker exec -it nn2_hadoop  ssh nn1.hantongchao.com  ls 
    235 sudo docker exec -it nn2_hadoop  ssh master1.hantongchao.com  ls 
    236 sudo docker exec -it nn2_hadoop  ssh master.hantongchao.com  ls 
    237 
    238 sudo docker exec -it nn1_hadoop bash
    239 
    240 
    241    
    242     

    core-site.xml

    <?xml version="1.0" encoding="UTF-8"?> <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> <!-- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> <!-- Put site-specific property overrides in this file. --> <configuration> <property> <name>fs.default.name</name> <value>hdfs://mycluster</value> </property> <property> <name>fs.defaultFS</name> <value>hdfs://mycluster</value> </property> <property> <name>hadoop.tmp.dir</name> <value>/data/hadoop/tmp</value> </property> <property> <name>dfs.datanode.data.dir</name> <value>/data/hadoop/tmp/dfs/data</value> </property> <property> <name>dfs.journalnode.edits.dir</name> <value>/data/hadoop/tmp/dfs/journal</value> </property> <property> <name>ha.zookeeper.quorum</name> <value>nn1.hantongchao.com:2181,nn2.hantongchao.com:2181,master.hantongchao.com:2181</value> </property> <property> <name>hadoop.proxyuser.spark.hosts</name> <value>*</value> </property> <property> <name>hadoop.proxyuser.spark.groups</name> <value>*</value> </property> </configuration>

      hdfs-site.xml

    <?xml version="1.0" encoding="UTF-8"?>
    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
    <!--
      Licensed under the Apache License, Version 2.0 (the "License");
      you may not use this file except in compliance with the License.
      You may obtain a copy of the License at
    
        http://www.apache.org/licenses/LICENSE-2.0
    
      Unless required by applicable law or agreed to in writing, software
      distributed under the License is distributed on an "AS IS" BASIS,
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      See the License for the specific language governing permissions and
      limitations under the License. See accompanying LICENSE file.
    -->
    
    <!-- Put site-specific property overrides in this file. -->
    
    <configuration>
    <property>
            <name>dfs.replication</name>
            <value>3</value>
        </property>
        <property>
            <name>dfs.namenode.name.dir</name>
            <value>/data/hadoop/tmp/dfs/name</value>
        </property>
     
        <!--
        <property>
            <name>dfs.hosts</name>
            <value>/data/hadoop/tmp/hosts.allow</value>
        </property>
        -->
        <property>
            <name>dfs.datanode.data.dir</name>
            <value>/data/hadoop/tmp/dfs/data</value>
        </property>
         <property>
            <name>dfs.name.dir</name>
            <value>/data/hadoop/tmp/dfs/namedir</value>
        </property>
        
          <property>
            <name>dfs.data.dir</name>
            <value>/data/hadoop/tmp/dfs/hdsfdata</value>
        </property>
        <property>
    			  <name>dfs.nameservices</name>
    			  <value>mycluster</value>
        </property>
        
    		 <property>
    		  <name>dfs.ha.namenodes.mycluster</name>
    		  <value>nn1,nn2</value>
    		 </property>
    			<property>
    			  <name>dfs.namenode.rpc-address.mycluster.nn1</name>
    			  <value>nn1.hantongchao.com:9000</value>
    			</property>
    			<property>
    			  <name>dfs.namenode.http-address.mycluster.nn1</name>
    			  <value>nn1.hantongchao.com:50070</value>
    			</property>
    			
    			<property>
    			  <name>dfs.namenode.rpc-address.mycluster.nn2</name>
    			  <value>nn2.hantongchao.com:9000</value>
    			</property>
    			<property>
    			  <name>dfs.namenode.http-address.mycluster.nn2</name>
    			  <value>nn2.hantongchao.com:50070</value>
    			</property>
    	 	
    			<property>
    			  <name>dfs.namenode.shared.edits.dir</name>
    			  <value>qjournal://nn1.hantongchao.com:8485;nn2.hantongchao.com:8485;master.hantongchao.com:8485/mycluster</value>
    			</property>
    			<property>
    			  <name>dfs.journalnode.edits.dir</name>
    			  <value>/data/hadoop/tmp/dfs/journal</value>
    			</property>
    			
    			
    			<!-- 开启NameNode失败自动切换 -->
            <property>
                    <name>dfs.ha.automatic-failover.enabled</name>
                    <value>true</value>
            </property>
            <!-- 配置失败自动切换实现方式 -->
            <property>
                    <name>dfs.client.failover.proxy.provider.mycluster</name>
                    <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
            </property>
            <!-- 配置隔离机制方法,多个机制用换行分割,即每个机制暂用一行-->
            <property>
                    <name>dfs.ha.fencing.methods</name>
                    <value>shell(/bin/true)</value>
            </property>
            <!-- 使用sshfence隔离机制时需要ssh免登陆 -->
            <property>
                    <name>dfs.ha.fencing.ssh.private-key-files</name>
                    <value>/root/.ssh/id_rsa</value>
            </property>
            <!-- 配置sshfence隔离机制超时时间 -->
            <property>
                    <name>dfs.ha.fencing.ssh.connect-timeout</name>
                    <value>30000</value>
            </property>
    </configuration>
    

      mapred-site.xml

    <?xml version="1.0"?>
    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
    <!--
      Licensed under the Apache License, Version 2.0 (the "License");
      you may not use this file except in compliance with the License.
      You may obtain a copy of the License at
    
        http://www.apache.org/licenses/LICENSE-2.0
    
      Unless required by applicable law or agreed to in writing, software
      distributed under the License is distributed on an "AS IS" BASIS,
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      See the License for the specific language governing permissions and
      limitations under the License. See accompanying LICENSE file.
    -->
    
    <!-- Put site-specific property overrides in this file. -->
    
    <configuration>
    
    <property>
    <name>mapreduce.map.memory.mb</name>
    <value>2046</value>
    </property>
    
    <property>
    <name>mapreduce.reduce.memory.mb</name>
    <value>2046</value>
    </property>
    
    <property>
    <name>mapred.child.java.opts</name>
    <value>-Xmx1024m</value>
    </property>
    <property>
    <name>mapred.reduce.child.java.opts</name>
    <value>-Xmx1024m</value>
    </property>
    <property>
    <name>mapreduce.jobhistory.address</name>
    <value>master.hantongchao.com:10020</value>
    </property>
    
    <property>
    <name>mapreduce.jobhistory.webapp.address</name>
    <value>master.hantongchao.com:19888</value>
    </property>
    
    <property>
    <name>mapreduce.jobhistory.intermediate-done-dir</name>
    <value>/data/hadoop/tmp/mr_history</value>
    </property>
    
    <property>
    <name>mapreduce.jobhistory.done-dir</name>
    <value>/data/hadoop/tmp/mr_history</value>
    </property>
    </configuration>
                        
    

      yarn-site.xml

    <?xml version="1.0"?>
    <!--
      Licensed under the Apache License, Version 2.0 (the "License");
      you may not use this file except in compliance with the License.
      You may obtain a copy of the License at
    
        http://www.apache.org/licenses/LICENSE-2.0
    
      Unless required by applicable law or agreed to in writing, software
      distributed under the License is distributed on an "AS IS" BASIS,
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      See the License for the specific language governing permissions and
      limitations under the License. See accompanying LICENSE file.
    -->
    <configuration>
    
    <!-- Site specific YARN configuration properties -->
    <property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
    </property>
    
    <property>
    <name>yarn.scheduler.minimum-allocation-mb</name>
    <value>2024</value>
    </property>
    <property>
    
    <name>yarn.scheduler.maximum-allocation-mb</name>
    <value>8096</value>
    </property>
    
    <property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
    </property>
    <property>
     
    <property>
    <name>yarn.nodemanager.vmem-pmem-ratio</name>
    <value>2.1</value>
    </property>
    
     
    </property>
     <!-- 开启RM高可靠 -->
            <property>
                    <name>yarn.resourcemanager.ha.enabled</name>
                    <value>true</value>
            </property>
            <!-- 指定RM的cluster id -->
            <property>
                    <name>yarn.resourcemanager.cluster-id</name>
                    <value>rm-cluster</value>
            </property>
            <!-- 指定RM的名字 -->
            <property>
                    <name>yarn.resourcemanager.ha.rm-ids</name>
                    <value>rm1,rm2</value>
            </property>
            <!-- 分别指定RM的地址 -->
            <property>
                    <name>yarn.resourcemanager.hostname.rm1</name>
                    <value>master.hantongchao.com</value>
            </property>
            <property>
                    <name>yarn.resourcemanager.hostname.rm2</name>
                    <value>master1.hantongchao.com</value>
            </property>
            
            <property>
                        <name>yarn.resourcemanager.resource-tracker.address.rm1</name>
                        <value>master.hantongchao.com:8031</value>
                    </property>
                     <property>
                        <name>yarn.resourcemanager.resource-tracker.address.rm2</name>
                        <value>master1.hantongchao.com:8031</value>
                    </property>
            <property>
                    <name>yarn.resourcemanager.ha.id</name>
                    <value>{HA_ID}</value>
            </property>       
            
            
            <property>
                    <name>yarn.resourcemanager.recovery.enabled</name>
                    <value>true</value>
            </property>
             
            <property>
                    <name>yarn.resourcemanager.store.class</name>
                    <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
            </property>
            <!-- 指定zk集群地址 -->
            <property>
                    <name>yarn.resourcemanager.zk-address</name>
                    <value>nn1.hantongchao.com:2181,nn2.hantongchao.com:2181,master.hantongchao.com:2181</value>
            </property>
            <property>
                    <name>yarn.nodemanager.aux-services</name>
                    <value>mapreduce_shuffle</value>
            </property>
    <property>
                    <name>yarn.log-aggregation-enable</name>
                    <value>true</value>
     </property>
    </configuration>

    salve

    master.hantongchao.com
    master1.hantongchao.com
    nn1.hantongchao.com
    nn2.hantongchao.com
  • 相关阅读:
    最大上升子序列
    vue的keep-alive组件
    对小程序的研究3
    对getBoundingClientRect属性的研究
    消除浮动的方式
    对微信小程序的研究2
    对小程序的研究1
    对props的研究
    对provide/inject的研究
    对calc()的研究
  • 原文地址:https://www.cnblogs.com/cndavy/p/5087644.html
Copyright © 2011-2022 走看看