zoukankan      html  css  js  c++  java
  • hadoop记录-hadoop集群日常运维命令

    hadoop集群日常运维命令
    #1.namenode
    hadoop namenode -format #格式化,慎用
    su hdfs
    hadoop-daemon.sh start namenode
    hadoop-daemon.sh stop namenode
    #2.journalnode
    hadoop-daemon.sh start journalnode
    hadoop-daemon.sh stop journalnode
    #3.zkfc
    hdfs zkfc -formatZK #格式化,慎用
    hadoop-daemon.sh start zkfc
    hadoop-daemon.sh stop zkfc
    #4.datanode
    hadoop-daemon.sh start datanode
    hadoop-daemon.sh stop datanode
    #5.nodemanager
    su yarn
    yarn-daemon.sh start nodemanager
    yarn-daemon.sh stop nodemanager
    #6.resourcemanager
    yarn-daemon.sh start resourcemanager
    yarn-daemon.sh stop resourcemanager
    #7.hive
    hive --service hiveserver2 #10000
    hive --service metastore #9083
    #8.zookeeper
    bin/zkServer.sh start  #2181
    bin/zkServer.sh stop
    bin/zkServer.sh status
    bin/zkCli.sh #连接后台zk服务查看znode,可加-server参数
    #HA Federation
    <?xml version="1.0" encoding="UTF-8" standalone="no"?>
    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
    <configuration>
    <property>
    <name>dfs.nameservices</name>
    <value>aaa,bbb</value>
    </property>
    
    <!-- aaa -->
    <property>
    <name>dfs.ha.namenodes.aaa</name>
    <value>nn1,nn2</value>
    </property>
    <property>
    <name>dfs.namenode.rpc-address.aaa.nn1</name>
    <value>1.1.1.1:8020</value>
    </property>
    <property>
    <name>dfs.namenode.rpc-address.aaa.nn2</name>
    <value>1.1.1.2:8020</value>
    </property>
    <property>
    <name>dfs.namenode.http-address.aaa.nn1</name>
    <value>1.1.1.1:50070</value>
    </property>
    <property>
    <name>dfs.namenode.http-address.aaa.nn2</name>
    <value>1.1.1.2:50070</value>
    </property>
    <property>
    <name>dfs.client.failover.proxy.provider.aaa</name>
    <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
    </property>
    <property>
    <name>dfs.namenode.shared.edits.dir.aaa</name>
    <value>qjournal://1.1.1.1:8485;1.1.1.2:8485;1.1.1.3:8485/aaa</value>
    </property>
    
    <!-- bbb -->
    <property>
    <name>dfs.ha.namenodes.bbb</name>
    <value>nn1,nn2</value>
    </property>
    <property>
    <name>dfs.namenode.rpc-address.bbb.nn1</name>
    <value>1.1.1.4:8020</value>
    </property>
    <property>
    <name>dfs.namenode.rpc-address.bbb.nn2</name>
    <value>1.1.1.5:8020</value>
    </property>
    <property>
    <name>dfs.namenode.http-address.bbb.nn1</name>
    <value>1.1.1.4:50070</value>
    </property>
    <property>
    <name>dfs.namenode.http-address.bbb.nn2</name>
    <value>1.1.1.5:50070</value>
    </property>
    <property>
    <name>dfs.namenode.servicerpc-address.bbb.nn1</name>
    <value>1.1.1.4:8020</value>
    </property>
    <property>
    <name>dfs.namenode.servicerpc-address.bbb.nn2</name>
    <value>1.1.1.5:8020</value>
    </property>
    <property>
    <name>dfs.client.failover.proxy.provider.bbb</name>
    <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
    </property>
    <property>
    <name>dfs.namenode.shared.edits.dir.bbb</name>
    <value>qjournal://1.1.1.4:8485;1.1.1.5:8485;1.1.1.6:8485/bbb</value>
    </property>
    
    <property>
    <name>dfs.namenode.name.dir</name>
    <value>file:///data/dfs/nn/local</value>
    </property>
    <property>
    <name>dfs.datanode.data.dir</name>
    <value>/data1/dfs/local,/data2/dfs/local</value>
    </property>
    <property>
    <name>dfs.journalnode.edits.dir</name>
    <value>/data/dfs/jn</value>
    </property>
    <property>
    <name>dfs.qjournal.start-segment.timeout.ms</name>
    <value>60000</value>
    </property>
    <property>
    <name>dfs.qjournal.prepare-recovery.timeout.ms</name>
    <value>240000</value>
    </property>
    <property>
    <name>dfs.qjournal.accept-recovery.timeout.ms</name>
    <value>240000</value>
    </property>
    <property>
    <name>dfs.qjournal.finalize-segment.timeout.ms</name>
    <value>240000</value>
    </property>
    <property>
    <name>dfs.qjournal.select-input-streams.timeout.ms</name>
    <value>60000</value>
    </property>
    <property>
    <name>dfs.qjournal.get-journal-state.timeout.ms</name>
    <value>240000</value>
    </property>
    <property>
    <name>dfs.qjournal.new-epoch.timeout.ms</name>
    <value>240000</value>
    </property>
    <property>
    <name>dfs.qjournal.write-txns.timeout.ms</name>
    <value>60000</value>
    </property>
    <property>
    <name>dfs.namenode.acls.enabled</name>
    <value>true</value>
    <description>Number of replication for each chunk.</description>
    </property>
    <property>
    <name>dfs.ha.fencing.methods</name>
    <value>sshfence</value>
    </property>
    <property>
    <name>dfs.ha.fencing.ssh.private-key-files</name>
    <value>/home/hdfs/.ssh/id_rsa</value>
    </property>
    <property>
    <name>dfs.ha.automatic-failover.enabled</name>
    <value>true</value>
    </property>
    <property>
    <name>dfs.permissions.superusergroup</name>
    <value>hadoop</value>
    </property>
    <property>
    <name>dfs.datanode.max.transfer.threads</name>
    <value>8192</value>
    </property>
    <property>
    <name>dfs.hosts.exclude</name>
    <value>/app/hadoop-conf/exclude.list</value>
    <description> List of nodes to decommission </description>
    </property>
    <property>
    <name>dfs.datanode.fsdataset.volume.choosing.policy</name>
    <value>org.apache.hadoop.hdfs.server.datanode.fsdataset.AvailableSpaceVolumeChoosingPolicy</value>
    </property>
    <property>
    <name>dfs.datanode.available-space-volume-choosing-policy.balanced-space-threshold</name>
    <value>10737418240</value>
    </property>
    <property>
    <name>dfs.datanode.available-space-volume-choosing-policy.balanced-space-preference-fraction</name>
    <value>0.75</value>
    </property>
    <property>
    <name>dfs.client.read.shortcircuit.streams.cache.size</name>
    <value>1000</value>
    </property>
    <property>
    <name>dfs.client.read.shortcircuit.streams.cache.expiry.ms</name>
    <value>10000</value>
    </property>
    <property>
    <name>dfs.client.read.shortcircuit</name>
    <value>true</value>
    </property>
    <property>
    <name>dfs.domain.socket.path</name>
    <value>/var/run/hadoop-hdfs/dn._PORT</value>
    </property>
    <property>
    <name>dfs.client.read.shortcircuit.skip.checksum</name>
    <value>false</value>
    </property>
    <property>
    <name>dfs.block.size</name>
    <value>536870912</value>
    </property>
    <property>
    <name>dfs.replication</name>
    <value>3</value>
    </property>
    <property>
    <name>dfs.namenode.handler.count</name>
    <value>300</value>
    </property>
    <property>
    <name>dfs.datanode.handler.count</name>
    <value>40</value>
    </property>
    <property>
    <name>dfs.webhdfs.enabled</name>
    <value>true</value>
    </property>
    <property>
    <name>dfs.namenode.datanode.registration.ip-hostname-check</name>
    <value>false</value>
    </property>
    
    <property>
    <name>dfs.datanode.du.reserved</name>
    <value>429496729600</value>
    </property>
    <!--hdfs balancer -policy datanode -threshold 5 -include -f host.txt-->
    <!--限制允许Datanode平衡群集的最大并发块移动数,默认为5 -->
    <property>
    <name>dfs.datanode.balance.max.concurrent.moves</name>
    <value>24</value>
    </property>
    <!--带宽100MB/s,默认为1MB/s-->
    <property>
    <name>dfs.datanode.balance.bandwidthPerSec</name>
    <value>104857600</value>
    </property>
    <!--mover线程数,默认为1000-->
    <property>
    <name>dfs.balancer.moverThreads</name>
    <value>1024</value>
    </property>
    <!--datanode传输的最大线程数(如果运行HBase的话建议为16384) -->
    <property>
    <name>dfs.datanode.max.transfer.threads</name>
    <value>4096/value>
    </property>
    <!--datanode传输的最大线程数(如果运行HBase的话建议为16384) -->
    <property>
    <name>dfs.datanode.max.transfer.threads</name>
    <value>4096/value>
    </property>
    <!--datanode最大移动数据大小, 默认为10737418240 (=10GB)-->
    <property>
    <name>dfs.balancer.max-size-to-move</name>
    <value>4096/value>
    </property>
    <!--$ hdfs dfsadmin -setBalancerBandwidth 1073741824
    $ nohup hdfs balancer 
    -Ddfs.datanode.balance.max.concurrent.moves = 10 
    -Ddfs.balancer.dispatcherThreads = 1024 
    -Ddfs.balance.bandwidthPerSec = 1073741824
    -->
    </configuration>
  • 相关阅读:
    zookeeper ACL(access control lists)权限控制
    zookeeper伪分布式集群搭建
    云服务器离线安装MariaDB安装步骤和解决办法
    云服务器——之Linux下安装tomcat
    云服务器之——Linux下配置JDK环境
    nginx安装与fastdfs配置--阿里云
    fastDFS 一二事
    云服务器 ECS--查找公网ip使用终端连接云服务
    springboot oauth 鉴权之——password、authorization_code鉴权
    springboot oauth 鉴权之——授权码authorization_code鉴权
  • 原文地址:https://www.cnblogs.com/xinfang520/p/10782969.html
Copyright © 2011-2022 走看看