一、 Hadoop配置修改
修改core-site.xml文件
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://bch:9000</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>cdh-master:2181,cdh-node1:2181,cdh-node2:2181</value>
</property>
<property>
<name>mapred.job.tracker</name>
<value>hdfs://172.168.10.251:9001</value>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
</configuration>
修改hdfs-site.xml
<configuration>
<property>
<name>dfs.nameservices</name>
<value>bch</value>
</property>
<property>
<name>dfs.ha.namenodes.bch</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.bch.nn1</name>
<value>cdh-master:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.bch.nn2</name>
<value>cdh-node1:8020</value>
</property>
<property>
<name>dfs.namenode.http-address.bch.nn1</name>
<value>cdh-master:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.bch.nn2</name>
<value>cdh-node1:50070</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://cdh-master:8485;cdh-node1:8485;cdh-node2:8485/bch</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/smp/hadoop-cdh4</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.bch</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<!--value>sshfence</value-->
<value>shell(/bin/true)</value>
</property>
<!-- namenode ha end -->
<!-- namenode ha auto failover -->
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.blocksize</name>
<value>268435456</value>
</property>
<property>
<name>dfs.namenode.http-address</name>
<value>cdh-master:50070</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>cdh-node1:50090</value>
</property>
<property>
<name>dfs.permissions.superusergroup</name>
<value>hadoop</value>
</property>
<property>
<name>dfs.name.dir</name>
<value>/data/1/dfs/nn,/data/2/dfs/nn</value>
</property>
<property>
<name>dfs.data.dir</name>
<value>/data/1/dfs/dn,/data/2/dfs/dn,/data/3/dfs/dn</value>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.datanode.max.xcievers</name>
<value>4096</value>
</property>
</configuration>
分发hadoop到各个机器