zoukankan      html  css  js  c++  java
  • CentOS6.4安装Hadoop2.0.5 alpha

    1.安装JDK7 rpm到/usr/java/jdk1.7.0_40,并建立软链接/usr/java/default到/usr/java/jdk1.7.0_40

    [root@server-308 ~]# rpm -ivh jdk-7u40-linux-x64.rpm
    Preparing... ########################################### [100%]
    1:jdk ########################################### [100%]
    Unpacking JAR files...
    rt.jar...
    jsse.jar...
    charsets.jar...
    tools.jar...
    localedata.jar...
    jfxrt.jar...

    [root@server-308 ~]# java -version
    java version "1.7.0_40"
    Java(TM) SE Runtime Environment (build 1.7.0_40-b43)
    Java HotSpot(TM) 64-Bit Server VM (build 24.0-b56, mixed mode)

    2.下载hadoop2.1beta并解压至/opt/hadoop-2.1.0-beta,并建立软链/opt/hadoop接到/opt/hadoop-2.1.0-beta

    [root@server-308 ~]# cd /opt
    [root@server-308 opt]# tar -zxvf /root/hadoop-2.1.0-beta.tar.gz

      [root@server-308 opt]# ln -s hadoop-2.1.0-beta hadoop
      [root@server-308 opt]# ls
      cedar hadoop hadoop-2.1.0-beta rh

    3. 设置环境变量

    [root@server-308 opt]# vim /etc/profile

    在文件末尾添加如下内容:

    export JAVA_HOME=/usr/java/default
    export PATH=${JAVA_HOME}/bin:${PATH}
    
    export HADOOP_PREFIX=/opt/hadoop
    export PATH=$PATH:$HADOOP_PREFIX/bin
    export PATH=$PATH:$HADOOP_PREFIX/sbin
    export HADOOP_MAPRED_HOME=${HADOOP_PREFIX}
    export HADOOP_COMMON_HOME=${HADOOP_PREFIX}
    export HADOOP_HDFS_HOME=${HADOOP_PREFIX}
    export YARN_HOME=${HADOOP_PREFIX}

    使上述修改生效:

    [root@server-308 opt]# source /etc/profile

    4.建立用户和组

    [root@server-308 opt]# groupadd hadoop
    [root@server-308 opt]# useradd -g hadoop yarn
    [root@server-308 opt]# useradd -g hadoop hdfs
    [root@server-308 opt]# useradd -g hadoop mapred

    5.创建相关路径和文件

    [root@server-308 opt]# mkdir -p /var/data/hadoop/hdfs/nn
    [root@server-308 opt]# mkdir -p /var/data/hadoop/hdfs/snn
    [root@server-308 opt]# mkdir -p /var/data/hadoop/hdfs/dn
    [root@server-308 opt]# mkdir -p /opt/hadoop/logs
    [root@server-308 opt]# mkdir -p /var/log/hadoop/yarn
    [root@server-308 opt]# chown hdfs:hadoop /var/data/hadoop/hdfs -R
    [root@server-308 opt]# chown yarn:hadoop /var/log/hadoop/yarn -R
    [root@server-308 opt]# chown yarn:hadoop /opt/hadoop/logs -R
    [root@server-308 opt]# chmod g+w /opt/hadoop/logs 

     6.修改hadoop配置文件

    [root@server-308 ~]# vim /opt/hadoop/etc/hadoop/core-site.xml
    <configuration>
      <property>
        <name>fs.default.name</name>
        <value>hdfs://192.168.32.31:8020</value>
      </property>
      <property>
        <name>hadoop.http.staticuser.user</name>
        <value>hdfs</value>
      </property>
    </configuration>

     7.修改hdfs-site.xml

    [root@server-308 ~]# vim /opt/hadoop/etc/hadoop/hdfs-site.xml
    <configuration>
      <property>
        <name>dfs.namenode.name.dir</name>
        <value>file:/var/data/hadoop/hdfs/nn</value>
      </property>
      <property>
        <name>fs.checkpoint.dir</name>
        <value>file:/var/data/hadoop/hdfs/snn</value>
      </property>
      <property>
        <name>fs.checkpoint.edits.dir</name>
        <value>file:/var/data/hadoop/hdfs/snn</value>
      </property>
      <property>
        <name>dfs.datanode.data.dir</name>
        <value>file:/var/data/hadoop/hdfs/dn</value>
      </property>
      <property>
        <name>dfs.replication</name>
        <value>1</value>
      </property>
      <property>
        <name>dfs.permissions</name>
        <value>false</value>
      </property>
    </configuration>

     6.修改mapred-site.xml

    #cp mapred-site.xml.template mapred-site.xml
    #vi mapred-site.xml
    <configuration>
      <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
      </property>
    </configuration>

     7.修改yarn-site.xml

    <configuration>
    
    <!-- Site specific YARN configuration properties -->
      <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce.shuffle</value>
      </property>
      <property>
        <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
        <value>org.apache.hadoop.mapred.ShuffleHandler</value>
      </property>
    </configuration>

    8.格式化集群

    su - hdfs
    cd /opt/hadoop/bin
    ./hdfs namenode -format

    9.启动hdfs

    cd ../sbin
    ./hadoop-daemon.sh start namenode
    ./hadoop-daemon.sh start secondarynamenode
    ./hadoop-daemon.sh start datanode

    10.检查hdfs是否正常工作

    hadoop fs -mkdir /user/shaochen
    hadoop fs -put mr-jobhistory-daemon.sh /user/shaochen/mr-jobhistory-daemon.sh
    hadoop fs -cat /user/shaochen/mr-jobhistory-daemon.sh

    12.启动yarn

    su - yarn
    cd /opt/hadoop/sbin
    ./yarn-daemon.sh start resourcemanager
    ./yarn-daemon.sh start nodemanager

     13.检验yarn是否正常工作

    ./hadoop jar ../share/hadoop/mapreduce/hadoop-mapreduce-examples-2.0.5-alpha.jar pi 16 1000
  • 相关阅读:
    重拾web开发JavaScript复习
    Linq GroupBy
    Gotchas 31对目标类型为指涉物为常量的指针类型的类型转换的认识误区
    感谢你遇到的问题
    IDisposable模式的一点理解
    感谢你遇到的问题(2)
    .Net通过OutLook发送邮件,附件的名称太长会显示乱码
    深度学习利器之自动微分(1)
    建议转载的发在文章(Aticles)而不是随笔(Posts)内
    Forum,ForumGroup和my forum的汉译
  • 原文地址:https://www.cnblogs.com/littlesuccess/p/3361497.html
Copyright © 2011-2022 走看看