zoukankan      html  css  js  c++  java
  • hbase配置kerberos

    #kerberos配置
    安装kerberos
    yum install krb5-libs krb5-server krb5-workstation

    vim /etc/krb5.conf
    [logging]
    default = FILE:/var/log/krb5libs.log
    kdc = FILE:/var/log/krb5kdc.log
    admin_server = FILE:/var/log/kadmind.log

    [libdefaults]
    default_realm = EXAMPLE.COM
    dns_lookup_realm = false
    dns_lookup_kdc = false
    ticket_lifetime = 24h
    renew_lifetime = 7d
    forwardable = true
    udp_preference_limit = 1

    [realms]
    EXAMPLE.COM = {
    kdc = dontdelete-master2
    admin_server = dontdelete-master2
    }

    [domain_realm]
    .example.com = EXAMPLE.COM
    example.com = EXAMPLE.COM


    vim /var/kerberos/krb5kdc/kdc.conf
    [kdcdefaults]
    kdc_ports = 88
    kdc_tcp_ports = 88

    [realms]
    EXAMPLE.COM = {
    #master_key_type = aes256-cts
    acl_file = /var/kerberos/krb5kdc/kadm5.acl
    dict_file = /usr/share/dict/words
    admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab
    supported_enctypes = aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
    }


    vim /var/kerberos/krb5kdc/kadm5.acl
    */admin@EXAMPLE.COM *

    kdb5_util create -s初始化一个kerberos数据库。
    出现 Loading random data 的时候另开个终端执行点消耗CPU的命令如 cat /dev/sda > /dev/urandom 可以加快随机数采集。

    kadmin.local -q "addprinc admin/admin"

    service krb5kdc start
    service kadmin start
    chkconfig krb5kdc on
    chkconfig kadmin on

    #添加用户并生成秘钥文件
    addprinc -pw root root/dontdelete-master2
    addprinc -pw root root/dontdelete-master1
    addprinc -pw root root/dontdelete-core2
    addprinc -pw root root/dontdelete-core1

    addprinc -pw root zookeeper/dontdelete-master2
    addprinc -pw root zookeeper/dontdelete-master1
    addprinc -pw root zookeeper/dontdelete-core2
    addprinc -pw root zookeeper/dontdelete-core1

    addprinc -pw root HTTP/dontdelete-master2
    addprinc -pw root HTTP/dontdelete-master1
    addprinc -pw root HTTP/dontdelete-core2
    addprinc -pw root HTTP/dontdelete-core1

    xst -k zookeeper.keytab zookeeper/dontdelete-master2
    xst -k zookeeper.keytab zookeeper/dontdelete-master1
    xst -k zookeeper.keytab zookeeper/dontdelete-core2
    xst -k zookeeper.keytab zookeeper/dontdelete-core1

    xst -k root.keytab root/dontdelete-master2
    xst -k root.keytab root/dontdelete-master1
    xst -k root.keytab root/dontdelete-core2
    xst -k root.keytab root/dontdelete-core1
    xst -k root.keytab HTTP/dontdelete-master2
    xst -k root.keytab HTTP/dontdelete-master1
    xst -k root.keytab HTTP/dontdelete-core2
    xst -k root.keytab HTTP/dontdelete-core1

    #zookeeper配置
    vim zoo.cfg
    tickTime=2000
    initLimit=10
    syncLimit=5
    dataDir=/data/zk/data
    dataLogDir=/data/zk/logs
    clientPort=2181
    server.1=dontdelete-master2:2888:3888
    server.2=dontdelete-master1:2888:3888
    server.3=dontdelete-core2:2888:3888
    server.4=dontdelete-core1:2888:3888
    authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
    jaasLoginRenew=3600000
    kerberos.removeHostFromPrincipal=true
    kerberos.removeRealmFromPrincipal=true

    vim java.env
    export JVMFLAGS="-Djava.security.auth.login.config=$ZOOKEEPER_HOME/conf/jaas.conf"
    #export CLIENT_JVMFLAGS="-Djava.security.auth.login.config=$ZOOKEEPER_HOME/conf/client.conf"

    vim jaas.conf
    Server{
    com.sun.security.auth.module.Krb5LoginModule required
    useKeyTab=true
    keyTab="/root/zookeeper.keytab"
    storeKey=true
    useTicketCache=false
    principal="zookeeper/dontdelete-master2@EXAMPLE.COM";
    };

    vim client.conf
    Client{
    com.sun.security.auth.module.Krb5LoginModule required
    useKeyTab=true
    keyTab="/root/zookeeper.keytab"
    storeKey=true
    useTicketCache=false
    principal="zookeeper/dontdelete-master2@EXAMPLE.COM";
    };

    分发所有配置并启动
    $ZOOKEEPER_HOME/bin/zkServer.sh start


    #安装jsvc
    wget http://mirrors.tuna.tsinghua.edu.cn/apache//commons/daemon/binaries/commons-daemon-1.1.0-bin.tar.gz
    wget http://mirrors.tuna.tsinghua.edu.cn/apache//commons/daemon/source/commons-daemon-1.1.0-src.tar.gz
    tar zxvf commons-daemon-1.1.0-bin.tar.gz
    tar zxvf commons-daemon-1.1.0-src.tar.gz
    cd commons-daemon-1.1.0-src
    cd src/native/unix/
    ./configure
    make
    cp jsvc /root/src/hadoop-2.6.5/libexec/
    cd ../../
    cd commons-daemon-1.1.0
    cp commons-daemon-1.1.0.jar /root/src/hadoop-2.6.5/share/hadoop/hdfs/lib/
    scp jsvc root@192.168.0.33:/root/src/hadoop-2.6.5/libexec/
    scp commons-daemon-1.1.0.jar root@192.168.0.33:/root/src/hadoop-2.6.5/share/hadoop/hdfs/lib/


    #hadoop配置
    vim core-site.xml
    <?xml version="1.0" encoding="UTF-8"?>
    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
    <!--
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
    You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License. See accompanying LICENSE file.
    -->

    <!-- Put site-specific property overrides in this file. -->

    <configuration>
    <property>
    <name>fs.defaultFS</name>
    <value>hdfs://ns1</value>
    </property>
    <property>
    <name>hadoop.tmp.dir</name>
    <value>/data/hadoop/tmp</value>
    </property>
    <property>
    <name>fs.trash.interval</name>
    <value>420</value>
    </property>
    <property>
    <name>ha.zookeeper.quorum</name>
    <value>dontdelete-master2:2181,dontdelete-master1:2181,dontdelete-core2:2181,dontdelete-core1:2181</value>
    </property>

    <!--lzo -->
    <property>
    <name>io.compression.codecs</name>
    <value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
    </property>
    <property>
    <name>io.compression.codec.lzo.class</name>
    <value>com.hadoop.compression.lzo.LzoCodec</value>
    </property>

    <!--hadoop security -->
    <property>
    <name>hadoop.security.authentication</name>
    <value>kerberos</value>
    </property>
    <property>
    <name>hadoop.security.authorization</name>
    <value>true</value>
    </property>

    </configuration>


    vim hdfs-site.xml
    <?xml version="1.0" encoding="UTF-8"?>
    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
    <!--
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
    You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License. See accompanying LICENSE file.
    -->

    <!-- Put site-specific property overrides in this file. -->

    <configuration>
    <!--nameservices,管理namenode的空间-->
    <property>
    <name>dfs.nameservices</name>
    <value>ns1</value>
    </property>
    <!--配置nameservices所管理的namenode,即ns1管理的namenode为nn1,nn2-->
    <property>
    <name>dfs.ha.namenodes.ns1</name>
    <value>nn1,nn2</value>
    </property>
    <!--配置两个namenode所在主机-->
    <property>
    <name>dfs.namenode.rpc-address.ns1.nn1</name>
    <value>dontdelete-master2:8020</value>
    </property>
    <property>
    <name>dfs.namenode.rpc-address.ns1.nn2</name>
    <value>dontdelete-master1:8020</value>
    </property>
    <!--配置两个namenode的web 50070端口-->
    <property>
    <name>dfs.namenode.http-address.ns1.nn1</name>
    <value>dontdelete-master2:50070</value>
    </property>
    <property>
    <name>dfs.namenode.http-address.ns1.nn2</name>
    <value>dontdelete-master1:50070</value>
    </property>
    <!--jernalNode所在主机-->
    <property>
    <name>dfs.namenode.shared.edits.dir</name>
    <value>qjournal://dontdelete-master2:8485;dontdelete-master1:8485;dontdelete-core2:8485;dontdelete-core1:8485/ns1</value>
    </property>
    <!--配置客户端代理-->
    <property>
    <name>dfs.client.failover.proxy.provider.ns1</name>
    <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
    </property>
    <!--配置两个namenode之间的隔离方式-->
    <property>
    <name>dfs.ha.fencing.methods</name>
    <value>
    sshfence
    shell(/bin/true)
    </value>
    </property>
    <property>
    <name>dfs.ha.fencing.ssh.connect-timeout</name>
    <value>30000</value>
    </property>
    <!--此处是自己主机的ssh-key路径-->
    <!--此处使用的是ssh隔离方式,必须提前配置两个namenode所在主机之间能够进行无密钥登陆,否则会失败-->
    <property>
    <name>dfs.ha.fencing.ssh.private-key-files</name>
    <value>/root/.ssh/id_rsa</value>
    </property>
    <!--配置jernal日志文件存放在本地磁盘的那个目录下-->
    <property>
    <name>dfs.journalnode.edits.dir</name>
    <value>/data/hadoop/journal</value>
    </property>
    <property>
    <name>dfs.ha.automatic-failover.enabled</name>
    <value>true</value>
    </property>
    <property>
    <name>dfs.replication</name>
    <value>2</value>
    </property>
    <property>
    <name>dfs.namenode.name.dir</name>
    <value>/data/hadoop/name</value>
    </property>
    <property>
    <name>dfs.datanode.data.dir</name>
    <value>/data/hadoop/data</value>
    </property>

    <!--NameNodesecurityconfig-->
    <property>
    <name>dfs.block.access.token.enable</name>
    <value>true</value>
    </property>
    <property>
    <name>dfs.namenode.keytab.file</name>
    <value>/root/root.keytab</value>
    </property>
    <property>
    <name>dfs.namenode.kerberos.principal</name>
    <value>root/_HOST@EXAMPLE.COM</value>
    </property>
    <property>
    <name>dfs.namenode.kerberos.https.principal</name>
    <value>HTTP/_HOST@EXAMPLE.COM</value>
    </property>

    <!--DataNode security config-->
    <property>
    <name>dfs.datanode.keytab.file</name>
    <value>/root/root.keytab</value>
    </property>
    <!-- <property>
    <name>dfs.datanode.data.dir.perm</name>
    <value>700</value>
    </property> -->
    <property>
    <name>dfs.datanode.kerberos.principal</name>
    <value>root/_HOST@EXAMPLE.COM</value>
    </property>
    <property>
    <name>dfs.datanode.kerberos.https.principal</name>
    <value>HTTP/_HOST@EXAMPLE.COM</value>
    </property>
    <property>
    <name>dfs.datanode.address</name>
    <value>0.0.0.0:1004</value>
    </property>
    <property>
    <name>dfs.datanode.http.address</name>
    <value>0.0.0.0:1006</value>
    </property>

    <!--web security config-->
    <property>
    <name>dfs.webhdfs.enabled</name>
    <value>true</value>
    </property>
    <property>
    <name>dfs.web.authentication.kerberos.principal</name>
    <value>HTTP/_HOST@EXAMPLE.COM</value>
    </property>
    <property>
    <name>dfs.web.authentication.kerberos.keytab</name>
    <value>/root/root.keytab</value>
    </property>

    <!--journalnode security config-->
    <property>
    <name>dfs.journalnode.keytab.file</name>
    <value>/root/root.keytab</value>
    </property>
    <property>
    <name>dfs.journalnode.kerberos.principal</name>
    <value>root/_HOST@EXAMPLE.COM</value>
    </property>
    <property>
    <name>dfs.journalnode.kerberos.internal.spnego.principal</name>
    <value>HTTP/_HOST@EXAMPLE.COM</value>
    </property>


    </configuration>


    vim hadoop-env.sh
    # Licensed to the Apache Software Foundation (ASF) under one
    # or more contributor license agreements. See the NOTICE file
    # distributed with this work for additional information
    # regarding copyright ownership. The ASF licenses this file
    # to you under the Apache License, Version 2.0 (the
    # "License"); you may not use this file except in compliance
    # with the License. You may obtain a copy of the License at
    #
    # http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software
    # distributed under the License is distributed on an "AS IS" BASIS,
    # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    # See the License for the specific language governing permissions and
    # limitations under the License.

    # Set Hadoop-specific environment variables here.

    # The only required environment variable is JAVA_HOME. All others are
    # optional. When running a distributed configuration it is best to
    # set JAVA_HOME in this file, so that it is correctly defined on
    # remote nodes.

    # The java implementation to use.
    #export JAVA_HOME=${JAVA_HOME}
    export JAVA_HOME=/usr/java/jdk1.8.0_171-amd64
    # The jsvc implementation to use. Jsvc is required to run secure datanodes
    # that bind to privileged ports to provide authentication of data transfer
    # protocol. Jsvc is not required if SASL is configured for authentication of
    # data transfer protocol using non-privileged ports.
    #export JSVC_HOME=${JSVC_HOME}
    export JSVC_HOME=/root/src/hadoop-2.6.5/libexec

    export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}

    # Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
    for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
    if [ "$HADOOP_CLASSPATH" ]; then
    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
    else
    export HADOOP_CLASSPATH=$f
    fi
    done

    # The maximum amount of heap to use, in MB. Default is 1000.
    #export HADOOP_HEAPSIZE=
    #export HADOOP_NAMENODE_INIT_HEAPSIZE=""

    # Extra Java runtime options. Empty by default.
    export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"

    # Command specific options appended to HADOOP_OPTS when specified
    export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
    export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"

    export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"

    export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
    export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"

    # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
    export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
    #HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"

    # On secure datanodes, user to run the datanode as after dropping privileges.
    # This **MUST** be uncommented to enable secure HDFS if using privileged ports
    # to provide authentication of data transfer protocol. This **MUST NOT** be
    # defined if SASL is configured for authentication of data transfer protocol
    # using non-privileged ports.
    #export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
    export HADOOP_SECURE_DN_USER=root

    # Where log files are stored. $HADOOP_HOME/logs by default.
    #export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER

    # Where log files are stored in the secure data environment.
    export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}

    ###
    # HDFS Mover specific parameters
    ###
    # Specify the JVM options to be used when starting the HDFS Mover.
    # These options will be appended to the options specified as HADOOP_OPTS
    # and therefore may override any similar flags set in HADOOP_OPTS
    #
    # export HADOOP_MOVER_OPTS=""

    ###
    # Advanced Users Only!
    ###

    # The directory where pid files are stored. /tmp by default.
    # NOTE: this should be set to a directory that can only be written to by
    # the user that will run the hadoop daemons. Otherwise there is the
    # potential for a symlink attack.
    export HADOOP_PID_DIR=${HADOOP_PID_DIR}
    export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}

    # A string representing this instance of hadoop. $USER by default.
    export HADOOP_IDENT_STRING=$USER

    export JAVA_LIBRARY_PATH=$HADOOP_HOME/lib/native


    vim slaves
    dontdelete-master2
    dontdelete-master1
    dontdelete-core2
    dontdelete-core1

    vim mapred-site.xml
    <?xml version="1.0"?>
    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
    <!--
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
    You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License. See accompanying LICENSE file.
    -->

    <!-- Put site-specific property overrides in this file. -->

    <configuration>
    <property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
    </property>
    <!-- 配置 MapReduce JobHistory Server 地址 ,默认端口10020 -->
    <property>
    <name>mapreduce.jobhistory.address</name>
    <value>0.0.0.0:10020</value>
    </property>
    <!-- 配置 MapReduce JobHistory Server web ui 地址, 默认端口19888 -->
    <property>
    <name>mapreduce.jobhistory.webapp.address</name>
    <value>0.0.0.0:19888</value>
    </property>
    <property>
    <name>mapreduce.map.output.compress</name>
    <value>true</value>
    </property>
    <property>
    <name>mapreduce.map.output.compress.codec</name>
    <value>com.hadoop.compression.lzo.LzoCodec</value>
    </property>
    <!--MapReduceJobHistoryServersecurityconfigs-->
    <property>
    <name>mapreduce.jobhistory.keytab</name>
    <value>/root/mapred.keytab</value>
    </property>
    <property>
    <name>mapreduce.jobhistory.principal</name>
    <value>mapred/_HOST@EXAMPLE.COM</value>
    </property>
    </configuration>


    vim yarn-site.xml
    <?xml version="1.0"?>
    <!--
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
    You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License. See accompanying LICENSE file.
    -->
    <configuration>
    <!--开启resourcemanagerHA,默认为false -->
    <property>
    <name>yarn.resourcemanager.ha.enabled</name>
    <value>true</value>
    </property>
    <!--开启自动恢复功能 -->
    <property>
    <name>yarn.resourcemanager.recovery.enabled</name>
    <value>true</value>
    </property>
    <!-- 指定RM的cluster id -->
    <property>
    <name>yarn.resourcemanager.cluster-id</name>
    <value>yrc</value>
    </property>
    <!--配置resourcemanager -->
    <property>
    <name>yarn.resourcemanager.ha.rm-ids</name>
    <value>rm1,rm2</value>
    </property>
    <!-- 分别指定RM的地址 -->
    <property>
    <name>yarn.resourcemanager.hostname.rm1</name>
    <value>dontdelete-master2</value>
    </property>
    <property>
    <name>yarn.resourcemanager.hostname.rm2</name>
    <value>dontdelete-master1</value>
    </property>
    <!-- <property> <name>yarn.resourcemanager.ha.id</name> <value>rm1</value>
    <description>If we want to launch more than one RM in single node,we need
    this configuration</description> </property> -->
    <!-- 指定zk集群地址 -->
    <property>
    <name>ha.zookeeper.quorum</name>
    <value>dontdelete-master2:2181,dontdelete-master1:2181,dontdelete-core2:2181,dontdelete-core1:2181</value>
    </property>
    <!--配置与zookeeper的连接地址-->
    <property>
    <name>yarn.resourcemanager.zk-state-store.address</name>
    <value>dontdelete-master2:2181,dontdelete-master1:2181,dontdelete-core2:2181,dontdelete-core1:2181</value>
    </property>
    <property>
    <name>yarn.resourcemanager.store.class</name>
    <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
    </property>
    <property>
    <name>yarn.resourcemanager.zk-address</name>
    <value>dontdelete-master2:2181,dontdelete-master1:2181,dontdelete-core2:2181,dontdelete-core1:2181</value>
    </property>
    <property>
    <name>yarn.resourcemanager.ha.automatic-failover.zk-base-path</name>
    <value>/yarn-leader-election</value>
    <description>Optionalsetting.Thedefaultvalueis/yarn-leader-election</description>
    </property>
    <property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
    </property>

    <!-- ResourceManager security configs -->
    <property>
    <name>yarn.resourcemanager.keytab</name>
    <value>/root/yarn.keytab</value>
    </property>
    <property>
    <name>yarn.resourcemanager.principal</name>
    <value>yarn/_HOST@EXAMPLE.COM</value>
    </property>
    <property>
    <name>yarn.resourcemanager.kerberos.https.principal</name>
    <value>HTTP/_HOST@EXAMPLE.COM</value>
    </property>
    <!-- NodeManager security configs -->
    <property>
    <name>yarn.nodemanager.keytab</name>
    <value>/root/yarn.keytab</value>
    </property>
    <property>
    <name>yarn.nodemanager.principal</name>
    <value>yarn/_HOST@EXAMPLE.COM</value>
    </property>
    <property>
    <name>yarn.nodemanager.container-executor.class</name>
    <value>org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor</value>
    </property>
    <property>
    <name>yarn.nodemanager.linux-container-executor.group</name>
    <value>hdfs</value>
    </property>
    </configuration>

    vim container-executor.cfg
    yarn.nodemanager.linux-container-executor.group=yarn#configured value of yarn.nodemanager.linux-container-executor.group
    banned.users=bin#comma separated list of users who can not run applications
    min.user.id=1#Prevent other super-users
    allowed.system.users=root,yarn,hdfs,mapred,nobody##comma separated list of system users who CAN run applications


    分发所有配置文件并启动
    $HADOOP_HOME/sbin/start-all.sh
    $HADOOP_HOME/sbin/start-secure-dns.sh


    #hbase配置
    vim hbase-site.xml
    <?xml version="1.0"?>
    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
    <!--
    /**
    *
    * Licensed to the Apache Software Foundation (ASF) under one
    * or more contributor license agreements. See the NOTICE file
    * distributed with this work for additional information
    * regarding copyright ownership. The ASF licenses this file
    * to you under the Apache License, Version 2.0 (the
    * "License"); you may not use this file except in compliance
    * with the License. You may obtain a copy of the License at
    *
    * http://www.apache.org/licenses/LICENSE-2.0
    *
    * Unless required by applicable law or agreed to in writing, software
    * distributed under the License is distributed on an "AS IS" BASIS,
    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    * See the License for the specific language governing permissions and
    * limitations under the License.
    */
    -->
    <configuration>
    <property>
    <name>hbase.rootdir</name>
    <!-- <value>hdfs://elasticsearch:9000/hbase</value> -->
    <value>hdfs://ns1/hbase</value>
    </property>
    <property>
    <name>hbase.cluster.distributed</name>
    <value>true</value>
    </property>
    <!-- <property>
    <name>hbase.master</name>
    <value>60000</value>
    </property>
    <property>
    <name>hbase.zookeeper.property.clientPort</name>
    <value>2181</value>
    </property>
    <property>
    <name>hbase.master.info.port</name>
    <value>60010</value>
    </property> -->
    <property>
    <name>hbase.zookeeper.quorum</name>
    <value>dontdelete-master2:2181,dontdelete-master1:2181,dontdelete-core2:2181,dontdelete-core1:2181</value>
    </property>
    <property>
    <name>hbase.zookeeper.property.dataDir</name>
    <value>/data/zk/data</value>
    </property>
    <!--hbase security-->
    <property>
    <name>hbase.security.authentication</name>
    <value>kerberos</value>
    </property>
    <property>
    <name>hbase.rpc.engine</name>
    <value>org.apache.hadoop.hbase.ipc.SecureRpcEngine</value>
    </property>
    <property>
    <name>hbase.regionserver.kerberos.principal</name>
    <value>root/_HOST@EXAMPLE.COM</value>
    </property>
    <property>
    <name>hbase.regionserver.keytab.file</name>
    <value>/root/root.keytab</value>
    </property>
    <property>
    <name>hbase.master.kerberos.principal</name>
    <value>root/_HOST@EXAMPLE.COM</value>
    </property>
    <property>
    <name>hbase.master.keytab.file</name>
    <value>/root/root.keytab</value>
    </property>
    <!--acl-->
    <property>
    <name>hbase.superuser</name>
    <value>hbase</value>
    </property>
    <property>
    <name>hbase.security.authorization</name>
    <value>true</value>
    </property>
    <property>
    <name>hbase.coprocessor.master.classes</name>
    <value>org.apache.hadoop.hbase.security.access.AccessController</value>
    </property>
    <property>
    <name>hbase.coprocessor.regionserver.classes</name>
    <value>org.apache.hadoop.hbase.security.access.AccessController</value>
    </property>
    <property>
    <name>hbase.coprocessor.region.classes</name>
    <value>org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint,org.apache.hadoop.hbase.security.access.AccessController</value>
    </property>

    </configuration>

    vim backup-masters
    dontdelete-master1

    vim regionservers
    dontdelete-core2
    dontdelete-core1

    vim hbase-env.sh
    #
    #/**
    # * Licensed to the Apache Software Foundation (ASF) under one
    # * or more contributor license agreements. See the NOTICE file
    # * distributed with this work for additional information
    # * regarding copyright ownership. The ASF licenses this file
    # * to you under the Apache License, Version 2.0 (the
    # * "License"); you may not use this file except in compliance
    # * with the License. You may obtain a copy of the License at
    # *
    # * http://www.apache.org/licenses/LICENSE-2.0
    # *
    # * Unless required by applicable law or agreed to in writing, software
    # * distributed under the License is distributed on an "AS IS" BASIS,
    # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    # * See the License for the specific language governing permissions and
    # * limitations under the License.
    # */

    # Set environment variables here.

    # This script sets variables multiple times over the course of starting an hbase process,
    # so try to keep things idempotent unless you want to take an even deeper look
    # into the startup scripts (bin/hbase, etc.)

    # The java implementation to use. Java 1.7+ required.
    export JAVA_HOME=/usr/java/jdk1.8.0_171-amd64

    # Extra Java CLASSPATH elements. Optional.
    # export HBASE_CLASSPATH=

    # The maximum amount of heap to use. Default is left to JVM default.
    # export HBASE_HEAPSIZE=1G

    # Uncomment below if you intend to use off heap cache. For example, to allocate 8G of
    # offheap, set the value to "8G".
    # export HBASE_OFFHEAPSIZE=1G

    # Extra Java runtime options.
    # Below are what we set by default. May only work with SUN JVM.
    # For more on why as well as other possible settings,
    # see http://wiki.apache.org/hadoop/PerformanceTuning
    export HBASE_OPTS="-XX:+UseConcMarkSweepGC"
    #export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 "
    export HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config=$HBASE_HOME/conf/zk-jaas.conf"


    # Configure PermSize. Only needed in JDK7. You can safely remove it for JDK8+
    export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m"
    export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m"

    # Uncomment one of the below three options to enable java garbage collection logging for the server-side processes.

    # This enables basic gc logging to the .out file.
    # export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps"

    # This enables basic gc logging to its own file.
    # If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
    # export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH>"

    # This enables basic GC logging to its own file with automatic log rolling. Only applies to jdk 1.6.0_34+ and 1.7.0_2+.
    # If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
    # export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M"

    # Uncomment one of the below three options to enable java garbage collection logging for the client processes.

    # This enables basic gc logging to the .out file.
    # export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps"

    # This enables basic gc logging to its own file.
    # If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
    # export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH>"

    # This enables basic GC logging to its own file with automatic log rolling. Only applies to jdk 1.6.0_34+ and 1.7.0_2+.
    # If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
    # export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M"

    # See the package documentation for org.apache.hadoop.hbase.io.hfile for other configurations
    # needed setting up off-heap block caching.

    # Uncomment and adjust to enable JMX exporting
    # See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
    # More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
    # NOTE: HBase provides an alternative JMX implementation to fix the random ports issue, please see JMX
    # section in HBase Reference Guide for instructions.

    # export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
    # export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10101"
    # export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10102"
    # export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
    # export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
    # export HBASE_REST_OPTS="$HBASE_REST_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10105"

    # File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
    # export HBASE_REGIONSERVERS=${HBASE_HOME}/conf/regionservers

    # Uncomment and adjust to keep all the Region Server pages mapped to be memory resident
    #HBASE_REGIONSERVER_MLOCK=true
    #HBASE_REGIONSERVER_UID="hbase"

    # File naming hosts on which backup HMaster will run. $HBASE_HOME/conf/backup-masters by default.
    # export HBASE_BACKUP_MASTERS=${HBASE_HOME}/conf/backup-masters

    # Extra ssh options. Empty by default.
    # export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"

    # Where log files are stored. $HBASE_HOME/logs by default.
    # export HBASE_LOG_DIR=${HBASE_HOME}/logs

    # Enable remote JDWP debugging of major HBase processes. Meant for Core Developers
    # export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8070"
    # export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8071"
    # export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8072"
    # export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8073"

    # A string representing this instance of hbase. $USER by default.
    # export HBASE_IDENT_STRING=$USER

    # The scheduling priority for daemon processes. See 'man nice'.
    # export HBASE_NICENESS=10

    # The directory where pid files are stored. /tmp by default.
    # export HBASE_PID_DIR=/var/hadoop/pids

    # Seconds to sleep between slave commands. Unset by default. This
    # can be useful in large clusters, where, e.g., slave rsyncs can
    # otherwise arrive faster than the master can service them.
    # export HBASE_SLAVE_SLEEP=0.1

    # Tell HBase whether it should manage it's own instance of Zookeeper or not.
    export HBASE_MANAGES_ZK=false

    # The default log rolling policy is RFA, where the log file is rolled as per the size defined for the
    # RFA appender. Please refer to the log4j.properties file to see more details on this appender.
    # In case one needs to do log rolling on a date change, one should set the environment property
    # HBASE_ROOT_LOGGER to "<DESIRED_LOG LEVEL>,DRFA".
    # For example:
    # HBASE_ROOT_LOGGER=INFO,DRFA
    # The reason for changing default to RFA is to avoid the boundary case of filling out disk space as
    # DRFA doesn't put any cap on the log size. Please refer to HBase-5655 for more context.
    export HBASE_LIBRARY_PATH=$HBASE_LIBRARY_PATH:$HBASE_HOME/lib/native/:/usr/local/lib/


    vim zk-jaas.conf
    Client {
    com.sun.security.auth.module.Krb5LoginModule required
    useKeyTab=true
    useTicketCache=false
    keyTab="/root/hbase.keytab"
    principal="hbase/dontdelete-master2@EXAMPLE.COM";
    };


    分发配置文件并启动
    $HBASE_HOME/bin/start-hbase.sh

    参考资料:
    https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/SecureMode.html
    https://ambari.apache.org/1.2.5/installing-hadoop-using-ambari/content/ambari-kerb-1-4.html
    https://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.6.5/bk_security/content/kerb-config-jaas.html
    https://www.cloudera.com/documentation/enterprise/5-7-x/topics/cdh_sg_hbase_authentication.html
    https://yq.aliyun.com/articles/25636

    #目前yarn 和 mapred配置不好用,待完善。。

  • 相关阅读:
    MySQL 中now()时间戳用法
    ajax local.href不跳转的原因之一
    Call to a member function select() on string错误
    TP框架中ajax post请求时提示404
    TP框架中field查询字段
    jQuery如何获得select选中的值?input单选radio选中的值
    TP框架中session操作
    InnerHtml() 与html( )的区别
    C++开源项目等收集
    RCU-数据库初始化参数
  • 原文地址:https://www.cnblogs.com/libin2015/p/9682452.html
Copyright © 2011-2022 走看看