zoukankan      html  css  js  c++  java
  • Hadoop平台配置汇总

    Hadoop平台配置汇总

    @(Hadoop)


    Hadoop

    hadoop-env.sh和yarn-env.sh中export log和pid的dir即可和JAVA_HOME。

    core-site.xml

    <configuration>
    <property>
    <name>fs.defaultFS</name>
    <value>hdfs://ns1</value>
    </property>
    <property>
    <name>hadoop.tmp.dir</name>
    <value>/data2/hadoop/tmp</value>
    </property>
    <property>
    <name>ha.zookeeper.quorum</name>
    <value>zx-hadoop-210-11:2181,zx-hadoop-210-12:2181,zx-hadoop-210-31:2181</value>
    </property>
    </configuration>

    hdfs-site.xml

    
    <configuration>
    <property>
    <name>dfs.name.dir</name>  
    <value>/data2/hadoop/hdfs/name</value>  
    </property>
    <property>
    <name>dfs.data.dir</name>  
    <value>/data2/hadoop/hdfs/data</value>  
    </property>
    <property>
    <name>dfs.hosts</name>
    <value>/usr/local/bigdata/hadoop/etc/hadoop/datanode-allow.list</value>
    </property>
    <property>
    <name>dfs.hosts.exclude</name>
    <value>/usr/local/bigdata/hadoop/etc/hadoop/datanode-deny.list</value>
    </property>
    <property>
    <name>dfs.nameservices</name>
    <value>ns1</value>
    </property>
    <property>
    <name>dfs.ha.namenodes.ns1</name>
    <value>nn1,nn2</value>
    </property>
    <property>
    <name>dfs.namenode.rpc-address.ns1.nn1</name>
    <value>zx-hadoop-210-11:9000</value>
    </property>
    <property>
    <name>dfs.namenode.http-address.ns1.nn1</name>
    <value>zx-hadoop-210-11:50070</value>
    </property>
    <property>
    <name>dfs.namenode.rpc-address.ns1.nn2</name>
    <value>zx-hadoop-210-12:9000</value>
    </property>
    <property>
    <name>dfs.namenode.http-address.ns1.nn2</name>
    <value>zx-hadoop-210-12:50070</value>
    </property>
    <property>
    <name>dfs.namenode.shared.edits.dir</name> 
    <value>qjournal://zx-hadoop-210-11:8485;zx-hadoop-210-12:8485;zx-hadoop-210-31:8485/ns1</value>
    </property>
    <property>
    <name>dfs.journalnode.edits.dir</name>
    <value>/data2/hadoop/journal</value>
    </property>
    <property>
    <name>dfs.ha.automatic-failover.enabled</name>
    <value>true</value>
    </property>
    <property>
    <name>dfs.client.failover.proxy.provider.ns1</name>
    <value>
    org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
    </value>
    </property>
    <property>
    <name>dfs.ha.fencing.methods</name>
    <value>
    sshfence
    shell(/bin/true)
    </value>
    </property>
    <property>
    <name>dfs.ha.fencing.ssh.private-key-files</name>
    <value>/home/hadoop/.ssh/id_rsa</value>
    </property>
    <property>
    <name>dfs.ha.fencing.ssh.connect-timeout</name>
    <value>30000</value>
    </property>
    </configuration>

    yarn-site.xml

    <configuration>
    <property>
    <name>yarn.resourcemanager.hostname</name>
    <value>zx-hadoop-210-11</value>
    </property>
    <property> 
    <name>yarn.nodemanager.aux-services</name> 
    <value>mapreduce_shuffle</value> 
    </property>
    <property>
    <name>yarn.log-aggregation-enable</name>
    <value>true</value>
    </property>
    <property>
    <name>yarn.nodemanager.remote-app-log-dir</name>
    <value>/data2/hadoop/logs/yarn</value>
    </property>
    <property>
    <name>yarn.log-aggregation.retain-seconds</name>
    <value>259200</value>
    </property>
    <property>
    <name>yarn.log-aggregation.retain-check-interval-seconds</name>
    <value>3600</value>
    </property>
    </configuration>

    mapred-site.xml

    <configuration>
    <property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
    </property>
    </configuration>

    HBase

    hbase-env.sh

    export JAVA_HOME=/usr/java/jdk1.7.0_55
    export HBASE_MANAGES_ZK=false
    export HBASE_OPTS="-XX:+UseConcMarkSweepGC"
    export HBASE_PID_DIR=/data2/hadoop/pid
    export HBASE_LOG_DIR=/data2/hadoop/logs/hbase

    hbase-site.xml

    <configuration>
    <property>
    <name>hbase.rootdir</name>
    <value>hdfs://ns1/hbase</value>
    </property>
    <property>
    <name>hbase.cluster.distributed</name>
    <value>true</value>
    </property>
    <property>
    <name>hbase.zookeeper.quorum</name>
    <value>zx-hadoop-210-11:2181,zx-hadoop-210-12:2181,zx-hadoop-210-31:2181</value>
    </property>
    <property>
    <name>hbase.master</name>
    <value>zx-hadoop-210-11</value>
    </property>
    <property>
    <name>zookeeper.session.timeout</name>
    <value>6000000</value>
    </property>
    </configuration>

    作者:@小黑

  • 相关阅读:
    关于 No buffer space available (maximum connections reached?): connect 的处理
    Cron 表达式
    Hudson 打包部署到Was上特别慢
    JAVA jar 参数
    CentOS 6 UNEXPECTED INCONSISTENCY RUN fsck MANUALLY
    SSH 连接很慢
    解决libc.so.6: version `GLIBC_2.14&#39; not found问题
    Maven 基本参数
    Shc 应用
    JAVA 关于JNI本地库加载
  • 原文地址:https://www.cnblogs.com/jchubby/p/5449353.html
Copyright © 2011-2022 走看看