java环境配置
修改环境变量
export JAVA_HOME=/usr/java/jdk1.7.0_79
export PATH=$PATH:$JAVA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
hadoop环境配置
mkdir /cloud
tar -zxvf hadoop.tar.gz -C /cloud
1.vim /etc/hadoop/hadoop-env.sh
vim /etc/hadoop/yarn-env.sh
修改JAVA_HOME=/usr/java/jdk1.7.0_79
修改环境变量
source /etc/profile
2.配置伪分布式
vim core-site.xml
<configuration>
<!--指定HDFS NameNode的地址-->
<property>
<name>fs.defaultFS</name>
<value>hdfs://yourIP(localhost):9000</value>
</property>
<!--指定hadoop运行时产生文件的存放目录-->
<!--tmp若不存在需要手动创建-->
<property>
<name>hadoop.tmp.dir</name>
<value>/cloud/hadoop-2.7.2/tmp</value>
</property>
</configuration>
vim hdfs-site.xml
<!--dfs name data 目录若不存在需要手动创建-->
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/cloud/hadoop-2.7.2/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/cloud/hadoop-2.7.2/dfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
</configuration>
修改/etc/hosts
加入yourIP hostname(设置主机名,不然yarn的时候会有问题)
ssh免密码登录
ssh-keygen -t rsa
全部回车默认
cd /root/.ssh
cp id_rsa.pub authorized_keys
测试ssh localhost
格式化namenode
hdfs namenode -format
启动集群
cd /cloud/hadoop-2.7.2/sbin
./start-dfs.sh
测试 http://localhost:50070/
3.配置yarn
cp mapred-site.xml.template mapred-site.xml
vim mapred-site.xml
<configuration>
<!--告诉hadoop mr运行在yarn上->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
vim yarn-site.xml
<!--告诉nodemanager获取数据的方式是shuffle->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<!--指定yarn 老大resourcemanager的地址 ->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>tla001</value>
</property>
参考:
http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/SingleCluster.html#Pseudo-Distributed_Operation
http://baike.xsoftlab.net/view/292.html
http://www.aboutyun.com/thread-12798-1-1.html