zoukankan      html  css  js  c++  java
  • hadoop+hive+hbase+zookeeper安装

    1,机器详情如下:
    
    Master
    192.168.11.100
    slave
    192.168.11.101
    1.1下面在每台机器上执行
    vi /etc/hosts
    Master   192.168.11.100
    slave     192.168.11.101
    vi /etc/sysconfig/network
    HOSTNAME=Master  #如果是slave 就写slave
    重启后生效
    
    
    
    2,在两台服务器上做相同的操作
    2.1解压所有包
    apache-hive-0.14.0-bin.tar.gz
    hadoop-2.5.2.tar.gz
    hbase-0.99.2-bin.tar.gz
    jdk-1.8.tar.gz
    zookeeper-3.4.6.tar.gz
    解压全部
    mv hadoop-2.5.2 /home/hadoop/     
    mv apache-hive-0.14.0-bin /usr/local/
    mv hbase-0.99.2 /usr/local/          
    mv zookeeper-3.4.6 /usr/local/        
    mv jdk-1.8 /usr/local/
    
    
    2.2环境变量
    vi /etc/profile
    ###set java_env
    export JAVA_HOME=/usr/local/jdk-1.8
    export JRE_HOME=/usr/local/jdk-1.8/jre
    export CLASS_PATH=.:$CLASS_PATH:$JAVA_HOME/lib:$JRE_HOME/lib
    export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin
    ###set hadoop_env
    export HADOOP_HOME=/home/hadoop/hadoop-2.5.2
    export HADOOP_COMMON_HOME=$HADOOP_HOME
    export HADOOP_HDFS_HOME=$HADOOP_HOME
    export HADOOP_MAPRED_HOME=$HADOOP_HOME
    export HADOOP_YARN_HOME=$HADOOP_HOME
    export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
    export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HADOOP_HOME/lib
    export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
    export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib"
    ###set hive_env
    exportHIVE_HOME=/usr/local/apache-hive-0.14.0-bin
    PATH=$PATH:$HIVE_HOME/bin
    ###set zookeeper_env
    ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.6
    export ZOOKEEPER_HOME
    export PATH=$PATH:$ZOOKEEPER_HOME/bin:$ZOOKEEPER_HOME/conf
    ###set habase_env
    export HBASE_MANAGES_ZK=false 
    export HBASE_HOME=/usr/local/hbase-0.99.2
    使生效环境变量()
    source /etc/profile
    
    修改hadoop配置文件
    cd /home/hadoop/hadoop-2.5.2/etc/hadoop
    vi hadoop-env.sh
    export JAVA_HOME=/usr/local/jdk-1.8
    
    vi core-site.xml
    <configuration>  
    <property>
    <name>fs.default.name</name>
    <value>hdfs://Master:9000</value>
    </property>
    <property>
    <name>hadoop.tmp.dir</name>
    <value>/home/hadoop/hadoop-2.4.0/tmp</value>
    </property>
    </configuration>  
    
    vi hdfs-site.xm
    <configuration>  
    <property>
    <name>dfs.replication</name>
    <value>2</value>
    </property>
    <property>
    <name>dfs.namenode.name.dir</name>
    <value>file:/home/hadoop/hadoop-2.5.2/dfs/name</value>
    </property>
    <property>
    <name>dfs.datanode.data.dir</name>
    <value>file:/home/hadoop/hadoop-2.5.2/dfs/data</value>
    </property>
    </configuration>  
    
    vi mapred-site.xml
    <configuration> 
    <property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
    </property>
    <property>
    <name>mapreduce.jobhistory.address</name>
    <value>Master:10020</value>
    </property>
    <property>
    <name>mapreduce.jobhistory.webapp.address</name>
    <value>Master:19888</value>
    </property>
    </configuration> 
    
    vi yarn-site.xml
    [root@Master hadoop]# cat yarn-site.xml 
    <configuration> 
    <property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
    </property>
    <property>
    <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
    </property>
    <property>
    <name>yarn.resourcemanager.address</name>
    <value>Master:8032</value>
    </property>
    <property>
    <name>yarn.resourcemanager.scheduler.address</name>
    <value>Master:8030</value>
    </property>
    <property>
    <name>yarn.resourcemanager.resource-tracker.address</name>
    <value>Master:8031</value>
    </property>
    <property>
    <name>yarn.resourcemanager.admin.address</name>
    <value>Master:8033</value>
    </property>
    <property>
    <name>yarn.resourcemanager.webapp.address</name>
    <value>Master:8088</value>
    </property>
    </configuration> 
    
    vi slaves
    slave1   #如果有多个slave ,换行再写
    
    3,创建用户免密码登陆(这里是用户是root)
    ssh-keygen -t rsa   一直回车,同时在加一台slave也做这一件事
    cd  /root/.ssh/
    cat id_rsa.pub >> authorized_keys
    chmod 600 authorized_keys
    scp -r authorized_keys root@192.168.11.101:/root/.ssh/
    ssh root@192.168.11.101  看是否需要密码登陆
    
    4,复制文件到另一台slave
    scp -r /home/hadoop/hadoop-2.5.2  root@192.168.11.101:/home/hadoop/
    
    5,启动
    cd /home/hadoop/hadoop-2.5.2
    #格式化HDFS文件
    ./bin/hdfs namenode -format
    #启动hadoop
    /sbin/start-all.sh 
    
    http://192.168.11.100:50070/
    http://192.168.11.100:8088/
    
    #查看进程
    Master
    /usr/local/jdk-1.8/bin/jps
    7744 Jps
    3218 ResourceManager
    3078 SecondaryNameNode
    7049 QuorumPeerMain
    2910 NameNode
    
    
    slave
    /usr/local/jdk-1.8/bin/jps
    1644 NodeManager
    2573 QuorumPeerMain
    2734 Jps
    1551 DataNode
    
    6,apache-hive-0.14.0 配置
    6.1安装mysql
    yum -y install mysql-server mysql-devel mysql-client
    service mysqld start
    mysql -uroot password ‘root’
    mysql -uroot -proot
    >create databases hive;
    6.2 修改配置文件
    vi /home/hadoop/hadoop-2.5.2/etc/hadoop/hive-site.xml
    <configuration>
    <property>
    <name>javax.jdo.option.ConnectionURL</name>
    <value>jdbc:mysql://localhost:3306/hive?characterEncoding=UTF-8</value>
    </property>
    <property>
    <name>javax.jdo.option.ConnectionDriverName</name>
    <value>com.mysql.jdbc.Driver</value>
    </property>
    <property>
    <name>javax.jdo.option.ConnectionUserName</name>
    <value>root</value>
    </property>
    <property>
    <name>javax.jdo.option.ConnectionPassword</name>
    <value>linux</value>
    </property>
    </configuration>
    
    
    7,zookeeper配置
    cd /usr/local/zookeeper-3.4.6/etc
    mv zoo_sample.cfg zoo.cfg
    vi zoo.cfg
    dataDir=/tmp/zookeeper
    server.1=Master:2888:3888
    server.2=slave:2888:3888
    clientPort=2181
    
    scp -r  /usr/local/zookeeper-3.4.6 root@192.168.11.101:/usr/local/
    echo “1” >> /tmp/zookeeper/myid
    echo “2” >> /tmp/zookeeper/myid  #这个是在slave上执行的
    
    bin/zkServer.sh start   #两边服务器都需要执行
    bin/zkCli.sh -server Master:2181  #客户端连接server
    
    #查看zookeeper服务器状态
    Master
    [root@Master zookeeper-3.4.6]# bin/zkServer.sh status
    JMX enabled by default
    Using config: /usr/local/zookeeper-3.4.6/bin/../conf/zoo.cfg
    Mode: follower
    slave
    [root@slave zookeeper-3.4.6]# bin/zkServer.sh status
    JMX enabled by default
    Using config: /usr/local/zookeeper-3.4.6/bin/../conf/zoo.cfg
    Mode: leader
    
    7.1 hbase配置
    vi /home/hadoop/hadoop-2.5.2/etc/hadoop/hbase-site.xml
    <configuration>
    <property>
      <name>hbase.rootdir</name>
      <value>hdfs://192.168.11.100:9000/hbase</value>
     </property>
     <property>
       <name>hbase.cluster.distributed</name>
       <value>true</value> 
     </property>
     <property>
       <name>hbase.master</name>
       <value>hdfs://192.168.11.100:60000</value>
     </property>
     <property>
       <name>hbase.zookeeper.quorum</name>
       <value>Master,slave1</value> 
     </property>
    
    vi /usr/local/hbase-0.99.2/conf /regionservers 
    Master
    slave
    7.2 复制到slave
    scp -r /usr/local/hbase-0.99.2 root@192.168.11.101:/usr/local/
    启动
    bin/start-hbase.sh  #两边服务器都启动
  • 相关阅读:
    目标检测
    模型压缩-L1-norm based channel pruning(Pruning Filters for Efficient ConvNets)
    ubuntu docker 环境安装
    姿态估计的数据集说明
    详解Pytorch中的网络构造,模型save和load,.pth权重文件解析
    MSE, MAE, Huber loss详解
    maskrcnn_benchmark 理解
    模型压缩-Learning Efficient Convolutional Networks through Network Slimming
    Focal Loss
    Github桌面版使用教程
  • 原文地址:https://www.cnblogs.com/zenghui940/p/4325875.html
Copyright © 2011-2022 走看看