zoukankan      html  css  js  c++  java
  • hadoop3.0.0部署


    配置前先查下JAVA_HOME的位置
    vim /etc/profile
    #set java environment
    JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.222.b10-1.el7_7.x86_64
    JRE_HOME=$JAVA_HOME/jre
    CLASS_PATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib
    PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin:/opt/confluent-5.2.2/bin
    HADOOP_HOME=/hongfeng/software/hadoop-3.0.0
    PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin

    vim /etc/hosts
    10.10.37.27 udisk-test-1
    10.10.77.8 udisk-test-2
    10.10.24.86 udisk-test-3

    编辑hadoop相关配置文件:
    vim /hongfeng/software/hadoop-3.0.0/etc/hadoop/hadoop-env.sh
    export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.222.b10-1.el7_7.x86_64
    export HDFS_NAMENODE_USER=root
    export HDFS_DATANODE_USER=root
    export HDFS_SECONDARYNAMENODE_USER=root
    export HDFS_NAMENODE_OPTS="-XX:+UseParallelGC -Xmx4g"

    vim /hongfeng/software/hadoop-3.0.0/etc/hadoop/core-site.xml

    <configuration>
    <property>
    <name>fs.defaultFS</name>
    <value>hdfs://udisk-test-1:8020</value>
    </property>
    <property>
    <name>hadoop.tmp.dir</name>
    <value>/data/hadoop/hdfs</value>
    </property>
    <property>
    <name>fs.trash.interval</name>
    <value>1440</value>
    </property>
    </configuration>

    vim /hongfeng/software/hadoop-3.0.0/etc/hadoop/hdfs-site.xml
    <?xml version="1.0" encoding="UTF-8"?>
    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

    <configuration>
    <property>
    <name>dfs.replication</name>
    <value>3</value>
    </property>
    <property>
    <name>dfs.namenode.http-address</name>
    <value>udisk-test-1:50070</value>
    </property>
    <property>
    <name>dfs.namenode.secondary.http-address</name>
    <value>udisk-test-3:9868</value>
    </property>
    <configuration>

    vim /hongfeng/software/hadoop-3.0.0/etc/hadoop/workers
    Fengfeng-dr-algo1
    Fengfeng-dr-algo2
    Fengfeng-dr-algo3
    Fengfeng-dr-algo4

    vim /hongfeng/software/hadoop-3.0.0/etc/hadoop/yarn-env.sh
    export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.222.b10-1.el7_7.x86_64
    export YARN_RESOURCEMANAGER_USER=root
    export HADOOP_SECURE_DN_USER=yarn
    export YARN_NODEMANAGER_USER=root


    vim /hongfeng/software/hadoop-3.0.0/etc/hadoop/yarn-site.xml
    <property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
    </property>
    <property>
    <name>yarn.resourcemanager.hostname</name>
    <value>udisk-test-2</value>
    </property>
    <property>
    <name>yarn.resourcemanager.address</name>
    <value>udisk-test-2:8032</value>
    </property>
    <property>
    <name>yarn.resourcemanager.scheduler.address</name>
    <value>udisk-test-2:8030</value>
    </property>
    <property>
    <name>yarn.resourcemanager.resource-tracker.address</name>
    <value>udisk-test-2:8031</value>
    </property>
    <property>
    <name>yarn.resourcemanager.admin.address</name>
    <value>udisk-test-2:8033</value>
    </property>
    <property>
    <name>yarn.resourcemanager.webapp.address</name>
    <value>udisk-test-2:8088</value>
    </property>
    <property>
    <name>yarn.log-aggregation-enable</name>
    <value>true</value>
    </property>

    cp mapred-site.xml.template mapred-site.xml

    <configuration>
    <property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
    </property>
    <property>
    <name>mapreduce.jobhistory.address</name>
    <value>udisk-test-2:10020</value>
    </property>
    <property>
    <name>mapreduce.jobhistory.webapp.address</name>
    <value>udisk-test-2:19888</value>
    </property>
    <property>
    <name>yarn.app.mapreduce.am.env</name>
    <value>HADOOP_MAPRED_HOME=/hongfeng/software/hadoop-3.0.0</value>
    </property>
    <property>
    <name>mapreduce.map.env</name>
    <value>HADOOP_MAPRED_HOME=/hongfeng/software/hadoop-3.0.0</value>
    </property>
    <property>
    <name>mapreduce.reduce.env</name>
    <value>HADOOP_MAPRED_HOME=/hongfeng/software/hadoop-3.0.0</value>
    </property>
    <property>
    <name>yarn.app.mapreduce.am.staging-dir</name>
    <value>/history</value>
    </property>
    <property>
    <name>yarn.nodemanager.vmem-check-enabled</name>
    <value>false</value>
    </property>
    <property>
    <name>mapreduce.map.log.level</name>
    <value>ERROR</value>
    </property>
    <property>
    <name>mapreduce.reduce.log.level</name>
    <value>ERROR</value>
    </property>
    </configuration>


    scp /etc/profile udisk-test-2:/etc/profile
    scp /etc/profile udisk-test-3:/etc/profile


    scp -r /hongfeng/software/hadoop-3.0.0/ udisk-test-2:/hongfeng/software/
    scp -r /hongfeng/software/hadoop-3.0.0/ udisk-test-3:/hongfeng/software/


    #格式化
    source /etc/profile #三个节点
    hdfs namenode -format
    [root@Fengfeng-dr-algo1 hadoop]# ll /data/hadoop/hdfs/
    total 0
    drwxr-xr-x. 3 root root 18 Aug 14 07:55 dfs
    [root@Fengfeng-dr-algo1 hadoop]# ll /data/hadoop/hdfs/dfs/name/current/
    total 16
    -rw-r--r--. 1 root root 391 Aug 14 07:55 fsimage_0000000000000000000
    -rw-r--r--. 1 root root 62 Aug 14 07:55 fsimage_0000000000000000000.md5
    -rw-r--r--. 1 root root 2 Aug 14 07:55 seen_txid
    -rw-r--r--. 1 root root 216 Aug 14 07:55 VERSION

    启动集群:
    start-dfs.sh

    ansible all -m shell -a 'jps'

    在Fengfeng-dr-algo2上启动yarn:
    start-yarn.sh

    ansible all -m shell -a 'jps'
    Fengfeng-dr-algo3 | SUCCESS | rc=0 >>
    20978 DataNode
    21444 Jps
    21295 NodeManager

    Fengfeng-dr-algo2 | SUCCESS | rc=0 >>
    24867 ResourceManager
    24356 DataNode
    25480 NodeManager
    25676 Jps

    Fengfeng-dr-algo4 | SUCCESS | rc=0 >>
    24625 NodeManager
    24180 DataNode
    24292 SecondaryNameNode
    24775 Jps

    Fengfeng-dr-algo1 | SUCCESS | rc=0 >>
    24641 NameNode
    25700 Jps
    24789 DataNode
    25500 NodeManager

    hdfs web:
    http://Fengfeng-dr-algo1:50070
    yarn web:
    http://Fengfeng-dr-algo2:8088/cluster

    测试:

    先做一个1.txt文件, copy一段英文,传到htfs

    hadoop jar /hongfeng/software/hadoop-3.1.2/share/hadoop/mapreduce/hadoop-mapreduce-examples-3.1.2.jar wordcount /1.txt /1.output

    https://www.cndba.cn/dave/article/3260

  • 相关阅读:
    HTML5(一)初识HTML5
    iOS手机流量抓包rvictl
    mysql 安全模式
    DNS解析
    Git删除文件
    Git创建本地仓库并推送至远程仓库
    【python】字典/dictionary操作
    Gson解析复杂JSON字符串的两种方式
    apk安装提示:Failure [INSTALL_FAILED_DUPLICATE_PERMISSION perm=XXX]
    su、sudo、su
  • 原文地址:https://www.cnblogs.com/hongfeng2019/p/11700408.html
Copyright © 2011-2022 走看看