三台虚拟机,centos6.5
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 192.168.59.130 m1 192.168.59.131 s1 192.168.59.132 s2
修改主机名
[root@m1 hadoop]# cat /etc/sysconfig/network NETWORKING=yes HOSTNAME=m1
修改主机映射
[root@m1 hadoop]# cat /etc/hosts 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 192.168.59.130 m1 192.168.59.131 s1 192.168.59.132 s2
ssh免密码登陆(注意! 要求每台机子互相都能ssh包括本机)
ssh-keygen -t rsa ssh-copy-id -i ~/.ssh/id_rsa.pub m2
安装jdk
http://www.cnblogs.com/xiaojf/p/6568426.html
安装hadoop2.7.3
解压,重命名
[root@m1 soft]# ll total 28 drwxr-xr-x. 8 root root 4096 Aug 17 2016 hadoop drwxr-xr-x. 2 root root 4096 Mar 27 06:31 jar drwxr-xr-x. 8 uucp 143 4096 Dec 12 16:50 jdk drwxr-xr-x. 7 root root 4096 Mar 22 05:46 kafka drwxrwxr-x. 6 1001 1001 4096 Mar 4 2016 scala-2.11.8 drwxr-xr-x. 4 root root 4096 Mar 21 06:56 tmp drwxr-xr-x. 10 1001 1001 4096 Aug 23 2016 zookeeper-3.4.9
创建目录存放日志文件还要有数据文件
mkdir -p /usr/local/soft/tmp/hadoop/tmp mkdir -p /usr/local/soft/tmp/hadoop/dfs/name mkdir -p /usr/local/soft/tmp/hadoop/dfs/data
修改配置文件
[root@m1 soft]# cd /usr/local/soft/hadoop/etc/hadoop/ [root@m1 hadoop]# ll total 152 -rw-r--r--. 1 root root 4436 Aug 17 2016 capacity-scheduler.xml -rw-r--r--. 1 root root 1335 Aug 17 2016 configuration.xsl -rw-r--r--. 1 root root 318 Aug 17 2016 container-executor.cfg -rw-r--r--. 1 root root 774 Aug 17 2016 core-site.xml -rw-r--r--. 1 root root 3589 Aug 17 2016 hadoop-env.cmd -rw-r--r--. 1 root root 4224 Aug 17 2016 hadoop-env.sh -rw-r--r--. 1 root root 2598 Aug 17 2016 hadoop-metrics2.properties -rw-r--r--. 1 root root 2490 Aug 17 2016 hadoop-metrics.properties -rw-r--r--. 1 root root 9683 Aug 17 2016 hadoop-policy.xml -rw-r--r--. 1 root root 775 Aug 17 2016 hdfs-site.xml -rw-r--r--. 1 root root 1449 Aug 17 2016 httpfs-env.sh -rw-r--r--. 1 root root 1657 Aug 17 2016 httpfs-log4j.properties -rw-r--r--. 1 root root 21 Aug 17 2016 httpfs-signature.secret -rw-r--r--. 1 root root 620 Aug 17 2016 httpfs-site.xml -rw-r--r--. 1 root root 3518 Aug 17 2016 kms-acls.xml -rw-r--r--. 1 root root 1527 Aug 17 2016 kms-env.sh -rw-r--r--. 1 root root 1631 Aug 17 2016 kms-log4j.properties -rw-r--r--. 1 root root 5511 Aug 17 2016 kms-site.xml -rw-r--r--. 1 root root 11237 Aug 17 2016 log4j.properties -rw-r--r--. 1 root root 931 Aug 17 2016 mapred-env.cmd -rw-r--r--. 1 root root 1383 Aug 17 2016 mapred-env.sh -rw-r--r--. 1 root root 4113 Aug 17 2016 mapred-queues.xml.template -rw-r--r--. 1 root root 758 Aug 17 2016 mapred-site.xml.template -rw-r--r--. 1 root root 10 Aug 17 2016 slaves -rw-r--r--. 1 root root 2316 Aug 17 2016 ssl-client.xml.example -rw-r--r--. 1 root root 2268 Aug 17 2016 ssl-server.xml.example -rw-r--r--. 1 root root 2191 Aug 17 2016 yarn-env.cmd -rw-r--r--. 1 root root 4567 Aug 17 2016 yarn-env.sh -rw-r--r--. 1 root root 690 Aug 17 2016 yarn-site.xml
yarn-env.sh
[root@m1 hadoop]# vi yarn-env.sh
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Set Hadoop-specific environment variables here. # The only required environment variable is JAVA_HOME. All others are # optional. When running a distributed configuration it is best to # set JAVA_HOME in this file, so that it is correctly defined on # remote nodes. # The java implementation to use. export JAVA_HOME=/usr/local/soft/jdk
slaves
[root@m1 hadoop]# vi slaves
s1
s2
core-site.xml
<configuration> <property> <name>fs.defaultFS</name> <value>hdfs://m1:9000</value> </property> <property> <name>io.file.buffer.size</name> <value>131072</value> </property> <property> <name>hadoop.tmp.dir</name> <value>file:/usr/local/soft/tmp/hadoop/tmp</value> <description>Abase for other temporary directories.</description> </property> </configuration>
hdfs-site.xml
<configuration> <property> <name>dfs.namenode.secondary.http-address</name> <value>m1:9001</value> </property> <property> <name>dfs.namenode.name.dir</name> <value>file:/usr/hadoop/dfs/name</value> </property> <property> <name>dfs.datanode.data.dir</name> <value>file:/usr/hadoop/dfs/data</value> </property> <property> <name>dfs.replication</name> <value>2</value> </property> <property> <name>dfs.webhdfs.enabled</name> <value>true</value> </property> </configuration>
mapred-site.xml
<configuration> <property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> <property> <name>mapreduce.jobhistory.address</name> <value>m1:10020</value> </property> <property> <name>mapreduce.jobhistory.webapp.address</name> <value>m1:19888</value> </property> </configuration>
yarn-site.xml
<configuration> <!-- Site specific YARN configuration properties --> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> <property> <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name> <value>org.apache.hadoop.mapred.ShuffleHandler</value> </property> <property> <name>yarn.resourcemanager.address</name> <value>m1:8032</value> </property> <property> <name>yarn.resourcemanager.scheduler.address</name> <value>m1:8030</value> </property> <property> <name>yarn.resourcemanager.resource-tracker.address</name> <value>m1:8031</value> </property> <property> <name>yarn.resourcemanager.admin.address</name> <value>m1:8033</value> </property> <property> <name>yarn.resourcemanager.webapp.address</name> <value>m1:8088</value> </property> </configuration>
设置Hadoop环境变量
export HADOOP_HOME=/usr/local/soft/hadoop
export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin
分发代码
[root@m1 soft]# scp -r hadoop root@s2:/usr/local/soft/
namenode format
[root@m1 soft]# hdfs namenode -format DEPRECATED: Use of this script to execute hdfs command is deprecated. Instead use the hdfs command for it. 17/03/27 07:50:12 INFO namenode.NameNode: STARTUP_MSG: /************************************************************ STARTUP_MSG: Starting NameNode STARTUP_MSG: host = m1/192.168.59.130 STARTUP_MSG: args = [-format] STARTUP_MSG: version = 2.7.3
启动
[root@m1 soft]# start-all.sh This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh Starting namenodes on [m1] m1: starting namenode, logging to /usr/local/soft/hadoop/logs/hadoop-root-namenode-m1.out s1: starting datanode, logging to /usr/local/soft/hadoop/logs/hadoop-root-datanode-s1.out s2: starting datanode, logging to /usr/local/soft/hadoop/logs/hadoop-root-datanode-s2.out Starting secondary namenodes [master] master: ssh: Could not resolve hostname master: Name or service not known starting yarn daemons starting resourcemanager, logging to /usr/local/soft/hadoop/logs/yarn-root-resourcemanager-m1.out s1: starting nodemanager, logging to /usr/local/soft/hadoop/logs/yarn-root-nodemanager-s1.out s2: starting nodemanager, logging to /usr/local/soft/hadoop/logs/yarn-root-nodemanager-s2.out
验证
[root@m1 soft]# hadoop dfs -ls / DEPRECATED: Use of this script to execute hdfs command is deprecated. Instead use the hdfs command for it. [root@m1 soft]# hadoop dfs -mkdir /xiaojf DEPRECATED: Use of this script to execute hdfs command is deprecated. Instead use the hdfs command for it. [root@m1 soft]# hadoop dfs -ls / DEPRECATED: Use of this script to execute hdfs command is deprecated. Instead use the hdfs command for it. Found 1 items drwxr-xr-x - root supergroup 0 2017-03-27 07:52 /xiaojf
完成