zoukankan      html  css  js  c++  java
  • hadoop配置参数速查大全

    dfs.block.access.key.update.interval=600
    dfs.block.access.token.enable=false
    dfs.block.access.token.lifetime=600
    dfs.blockreport.initialDelay=0
    dfs.blockreport.intervalMsec=21600000
    dfs.blockreport.split.threshold=1000000
    dfs.blocksize=134217728
    dfs.bytes-per-checksum=512
    dfs.cachereport.intervalMsec=10000
    dfs.client-write-packet-size=65536
    dfs.client.block.write.replace-datanode-on-failure.enable=true
    dfs.client.block.write.replace-datanode-on-failure.policy=DEFAULT
    dfs.client.block.write.retries=3
    dfs.client.cached.conn.retry=3
    dfs.client.context=default
    dfs.client.datanode-restart.timeout=30
    dfs.client.failover.connection.retries=0
    dfs.client.failover.connection.retries.on.timeouts=0
    dfs.client.failover.max.attempts=15
    dfs.client.failover.sleep.base.millis=500
    dfs.client.failover.sleep.max.millis=15000
    dfs.client.file-block-storage-locations.num-threads=10
    dfs.client.file-block-storage-locations.timeout.millis=1000
    dfs.client.https.keystore.resource=ssl-client.xml
    dfs.client.https.need-auth=false
    dfs.client.mmap.cache.size=256
    dfs.client.mmap.cache.timeout.ms=3600000
    dfs.client.mmap.enabled=true
    dfs.client.mmap.retry.timeout.ms=300000
    dfs.client.short.circuit.replica.stale.threshold.ms=1800000
    dfs.client.use.datanode.hostname=false
    dfs.client.write.exclude.nodes.cache.expiry.interval.millis=600000
    dfs.datanode.address=0.0.0.0:50010
    dfs.datanode.available-space-volume-choosing-policy.balanced-space-preference-fraction=0.75f
    dfs.datanode.available-space-volume-choosing-policy.balanced-space-threshold=10737418240
    dfs.datanode.balance.bandwidthPerSec=1048576
    dfs.datanode.data.dir=file://${hadoop.tmp.dir}/dfs/data
    dfs.datanode.data.dir.perm=700
    dfs.datanode.directoryscan.interval=21600
    dfs.datanode.directoryscan.threads=1
    dfs.datanode.dns.interface=default
    dfs.datanode.dns.nameserver=default
    dfs.datanode.drop.cache.behind.reads=false
    dfs.datanode.drop.cache.behind.writes=false
    dfs.datanode.du.reserved=0
    dfs.datanode.failed.volumes.tolerated=0
    dfs.datanode.fsdatasetcache.max.threads.per.volume=4
    dfs.datanode.handler.count=10
    dfs.datanode.hdfs-blocks-metadata.enabled=false
    dfs.datanode.http.address=0.0.0.0:50075
    dfs.datanode.https.address=0.0.0.0:50475
    dfs.datanode.ipc.address=0.0.0.0:50020
    dfs.datanode.max.locked.memory=0
    dfs.datanode.max.transfer.threads=4096
    dfs.datanode.readahead.bytes=4193404
    dfs.datanode.shared.file.descriptor.paths=/dev/shm,/tmp
    dfs.datanode.sync.behind.writes=false
    dfs.datanode.use.datanode.hostname=false
    dfs.default.chunk.view.size=32768
    dfs.encrypt.data.transfer=false
    dfs.ha.automatic-failover.enabled=false
    dfs.ha.fencing.ssh.connect-timeout=30000
    dfs.ha.log-roll.period=120
    dfs.ha.tail-edits.period=60
    dfs.heartbeat.interval=3
    dfs.http.policy=HTTP_ONLY
    dfs.https.enable=false
    dfs.https.server.keystore.resource=ssl-server.xml
    dfs.image.compress=false
    dfs.image.compression.codec=org.apache.hadoop.io.compress.DefaultCodec
    dfs.image.transfer.bandwidthPerSec=0
    dfs.image.transfer.chunksize=65536
    dfs.image.transfer.timeout=60000
    dfs.journalnode.http-address=0.0.0.0:8480
    dfs.journalnode.https-address=0.0.0.0:8481
    dfs.journalnode.rpc-address=0.0.0.0:8485
    dfs.namenode.accesstime.precision=3600000
    dfs.namenode.acls.enabled=false
    dfs.namenode.audit.loggers=default
    dfs.namenode.avoid.read.stale.datanode=false
    dfs.namenode.avoid.write.stale.datanode=false
    dfs.namenode.backup.address=0.0.0.0:50100
    dfs.namenode.backup.http-address=0.0.0.0:50105
    dfs.namenode.checkpoint.check.period=60
    dfs.namenode.checkpoint.dir=file://${hadoop.tmp.dir}/dfs/namesecondary
    dfs.namenode.checkpoint.edits.dir=${dfs.namenode.checkpoint.dir}
    dfs.namenode.checkpoint.max-retries=3
    dfs.namenode.checkpoint.period=3600
    dfs.namenode.checkpoint.txns=1000000
    dfs.namenode.datanode.registration.ip-hostname-check=true
    dfs.namenode.decommission.interval=30
    dfs.namenode.decommission.nodes.per.interval=5
    dfs.namenode.delegation.key.update-interval=86400000
    dfs.namenode.delegation.token.max-lifetime=604800000
    dfs.namenode.delegation.token.renew-interval=86400000
    dfs.namenode.edit.log.autoroll.check.interval.ms=300000
    dfs.namenode.edit.log.autoroll.multiplier.threshold=2.0
    dfs.namenode.edits.dir=${dfs.namenode.name.dir}
    dfs.namenode.edits.journal-plugin.qjournal=org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager
    dfs.namenode.edits.noeditlogchannelflush=false
    dfs.namenode.enable.retrycache=true
    dfs.namenode.fs-limits.max-blocks-per-file=1048576
    dfs.namenode.fs-limits.max-component-length=255
    dfs.namenode.fs-limits.max-directory-items=1048576
    dfs.namenode.fs-limits.min-block-size=0
    dfs.namenode.handler.count=10
    dfs.namenode.http-address=0.0.0.0:50070
    dfs.namenode.https-address=0.0.0.0:50470
    dfs.namenode.invalidate.work.pct.per.iteration=0.32f
    dfs.namenode.kerberos.internal.spnego.principal=${dfs.web.authentication.kerberos.principal}
    dfs.namenode.list.cache.directives.num.responses=100
    dfs.namenode.list.cache.pools.num.responses=100
    dfs.namenode.logging.level=info
    dfs.namenode.max.extra.edits.segments.retained=10000
    dfs.namenode.max.objects=0
    dfs.namenode.name.dir=file://${hadoop.tmp.dir}/dfs/name
    dfs.namenode.name.dir.restore=false
    dfs.namenode.num.checkpoints.retained=2
    dfs.namenode.num.extra.edits.retained=1000000
    dfs.namenode.path.based.cache.block.map.allocation.percent=0.25
    dfs.namenode.path.based.cache.refresh.interval.ms=30000
    dfs.namenode.path.based.cache.retry.interval.ms=30000
    dfs.namenode.reject-unresolved-dn-topology-mapping=false
    dfs.namenode.replication.considerLoad=true
    dfs.namenode.replication.interval=3
    dfs.namenode.replication.min=1
    dfs.namenode.replication.work.multiplier.per.iteration=2
    dfs.namenode.retrycache.expirytime.millis=600000
    dfs.namenode.retrycache.heap.percent=0.03f
    dfs.namenode.safemode.extension=30000
    dfs.namenode.safemode.min.datanodes=0
    dfs.namenode.safemode.threshold-pct=0.999f
    dfs.namenode.secondary.http-address=0.0.0.0:50090
    dfs.namenode.secondary.https-address=0.0.0.0:50091
    dfs.namenode.stale.datanode.interval=30000
    dfs.namenode.support.allow.format=true
    dfs.namenode.write.stale.datanode.ratio=0.5f
    dfs.permissions.enabled=true
    dfs.permissions.superusergroup=supergroup
    dfs.replication=3
    dfs.replication.max=512
    dfs.secondary.namenode.kerberos.internal.spnego.principal=${dfs.web.authentication.kerberos.principal}
    dfs.short.circuit.shared.memory.watcher.interrupt.check.ms=60000
    dfs.stream-buffer-size=4096
    dfs.support.append=true
    dfs.webhdfs.enabled=true
    dfs.webhdfs.user.provider.user.pattern=^[A-Za-z_][A-Za-z0-9._-]*[$]?$
    file.blocksize=67108864
    file.bytes-per-checksum=512
    file.client-write-packet-size=65536
    file.replication=1
    file.stream-buffer-size=4096
    fs.AbstractFileSystem.file.impl=org.apache.hadoop.fs.local.LocalFs
    fs.AbstractFileSystem.hdfs.impl=org.apache.hadoop.fs.Hdfs
    fs.AbstractFileSystem.viewfs.impl=org.apache.hadoop.fs.viewfs.ViewFs
    fs.automatic.close=true
    fs.client.resolve.remote.symlinks=true
    fs.defaultFS=file:///
    fs.df.interval=60000
    fs.du.interval=600000
    fs.ftp.host=0.0.0.0
    fs.ftp.host.port=21
    fs.ftp.password.localhost=password
    fs.ftp.user.localhost=user
    fs.permissions.umask-mode=022
    fs.s3.block.size=128
    fs.s3.buffer.dir=${hadoop.tmp.dir}/s3
    fs.s3.maxRetries=4
    fs.s3.sleepTimeSeconds=10
    fs.s3n.block.size=67108864
    fs.s3n.multipart.copy.block.size=5368709120
    fs.s3n.multipart.uploads.block.size=67108864
    fs.s3n.multipart.uploads.enabled=false
    fs.swift.impl=org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem
    fs.trash.checkpoint.interval=0
    fs.trash.interval=0
    ftp.blocksize=67108864
    ftp.bytes-per-checksum=512
    ftp.client-write-packet-size=65536
    ftp.replication=3
    ftp.stream-buffer-size=4096
    ha.failover-controller.cli-check.rpc-timeout.ms=20000
    ha.failover-controller.graceful-fence.connection.retries=1
    ha.failover-controller.graceful-fence.rpc-timeout.ms=5000
    ha.failover-controller.new-active.rpc-timeout.ms=60000
    ha.health-monitor.check-interval.ms=1000
    ha.health-monitor.connect-retry-interval.ms=1000
    ha.health-monitor.rpc-timeout.ms=45000
    ha.health-monitor.sleep-after-disconnect.ms=1000
    ha.zookeeper.acl=world:anyone:rwcda
    ha.zookeeper.parent-znode=/hadoop-ha
    ha.zookeeper.session-timeout.ms=5000
    hadoop.common.configuration.version=0.23.0
    hadoop.fuse.connection.timeout=300
    hadoop.fuse.timer.period=5
    hadoop.hdfs.configuration.version=1
    hadoop.http.authentication.kerberos.keytab=${user.home}/hadoop.keytab
    hadoop.http.authentication.kerberos.principal=HTTP/_HOST@LOCALHOST
    hadoop.http.authentication.signature.secret.file=${user.home}/hadoop-http-auth-signature-secret
    hadoop.http.authentication.simple.anonymous.allowed=true
    hadoop.http.authentication.token.validity=36000
    hadoop.http.authentication.type=simple
    hadoop.http.filter.initializers=org.apache.hadoop.http.lib.StaticUserWebFilter
    hadoop.http.staticuser.user=dr.who
    hadoop.jetty.logs.serve.aliases=true
    hadoop.kerberos.kinit.command=kinit
    hadoop.rpc.protection=authentication
    hadoop.rpc.socket.factory.class.default=org.apache.hadoop.net.StandardSocketFactory
    hadoop.security.authentication=simple
    hadoop.security.authorization=false
    hadoop.security.group.mapping=org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback
    hadoop.security.group.mapping.ldap.directory.search.timeout=10000
    hadoop.security.group.mapping.ldap.search.attr.group.name=cn
    hadoop.security.group.mapping.ldap.search.attr.member=member
    hadoop.security.group.mapping.ldap.search.filter.group=(objectClass=group)
    hadoop.security.group.mapping.ldap.search.filter.user=(&(objectClass=user)(sAMAccountName={0}))
    hadoop.security.group.mapping.ldap.ssl=false
    hadoop.security.groups.cache.secs=300
    hadoop.security.groups.cache.warn.after.ms=5000
    hadoop.security.instrumentation.requires.admin=false
    hadoop.security.token.service.use_ip=false
    hadoop.security.uid.cache.secs=14400
    hadoop.ssl.client.conf=ssl-client.xml
    hadoop.ssl.enabled=false
    hadoop.ssl.hostname.verifier=DEFAULT
    hadoop.ssl.keystores.factory.class=org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory
    hadoop.ssl.require.client.cert=false
    hadoop.ssl.server.conf=ssl-server.xml
    hadoop.tmp.dir=build/test
    hadoop.user.group.static.mapping.overrides=dr.who=;
    hadoop.util.hash.type=murmur
    hadoop.work.around.non.threadsafe.getpwuid=false
    io.bytes.per.checksum=512
    io.compression.codec.bzip2.library=system-native
    io.file.buffer.size=4096
    io.map.index.interval=128
    io.map.index.skip=0
    io.mapfile.bloom.error.rate=0.005
    io.mapfile.bloom.size=1048576
    io.native.lib.available=true
    io.seqfile.compress.blocksize=1000000
    io.seqfile.lazydecompress=true
    io.seqfile.local.dir=${hadoop.tmp.dir}/io/local
    io.seqfile.sorter.recordlimit=1000000
    io.serializations=org.apache.hadoop.io.serializer.WritableSerialization,org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization,org.apache.hadoop.io.serializer.avro.AvroReflectSerialization
    io.skip.checksum.errors=false
    ipc.client.connect.max.retries=10
    ipc.client.connect.max.retries.on.timeouts=45
    ipc.client.connect.retry.interval=1000
    ipc.client.connect.timeout=20000
    ipc.client.connection.maxidletime=10000
    ipc.client.fallback-to-simple-auth-allowed=false
    ipc.client.idlethreshold=4000
    ipc.client.kill.max=10
    ipc.client.tcpnodelay=false
    ipc.server.listen.queue.size=128
    ipc.server.tcpnodelay=false
    map.sort.class=org.apache.hadoop.util.QuickSort
    mapred.child.java.opts=-Xmx200m
    mapreduce.am.max-attempts=2
    mapreduce.app-submission.cross-platform=false
    mapreduce.client.completion.pollinterval=5000
    mapreduce.client.genericoptionsparser.used=true
    mapreduce.client.output.filter=FAILED
    mapreduce.client.progressmonitor.pollinterval=1000
    mapreduce.client.submit.file.replication=10
    mapreduce.cluster.acls.enabled=false
    mapreduce.cluster.local.dir=${hadoop.tmp.dir}/mapred/local
    mapreduce.cluster.temp.dir=${hadoop.tmp.dir}/mapred/temp
    mapreduce.framework.name=local
    mapreduce.ifile.readahead=true
    mapreduce.ifile.readahead.bytes=4194304
    mapreduce.input.fileinputformat.list-status.num-threads=1
    mapreduce.input.fileinputformat.split.minsize=0
    mapreduce.job.acl-modify-job= 
    mapreduce.job.acl-view-job= 
    mapreduce.job.classloader=false
    mapreduce.job.classloader.system.classes=java.,javax.,org.apache.commons.logging.,org.apache.log4j.,org.apache.hadoop.
    mapreduce.job.committer.setup.cleanup.needed=true
    mapreduce.job.complete.cancel.delegation.tokens=true
    mapreduce.job.counters.limit=120
    mapreduce.job.end-notification.max.attempts=5
    mapreduce.job.end-notification.max.retry.interval=5000
    mapreduce.job.end-notification.retry.attempts=0
    mapreduce.job.end-notification.retry.interval=1000
    mapreduce.job.hdfs-servers=${fs.defaultFS}
    mapreduce.job.jvm.numtasks=1
    mapreduce.job.map.output.collector.class=org.apache.hadoop.mapred.MapTask$MapOutputBuffer
    mapreduce.job.maps=2
    mapreduce.job.max.split.locations=10
    mapreduce.job.maxtaskfailures.per.tracker=3
    mapreduce.job.queuename=default
    mapreduce.job.reduce.shuffle.consumer.plugin.class=org.apache.hadoop.mapreduce.task.reduce.Shuffle
    mapreduce.job.reduce.slowstart.completedmaps=0.05
    mapreduce.job.reduces=1
    mapreduce.job.speculative.slownodethreshold=1.0
    mapreduce.job.speculative.slowtaskthreshold=1.0
    mapreduce.job.speculative.speculativecap=0.1
    mapreduce.job.split.metainfo.maxsize=10000000
    mapreduce.job.token.tracking.ids.enabled=false
    mapreduce.job.ubertask.enable=false
    mapreduce.job.ubertask.maxmaps=9
    mapreduce.job.ubertask.maxreduces=1
    mapreduce.job.userlog.retain.hours=24
    mapreduce.jobhistory.address=0.0.0.0:10020
    mapreduce.jobhistory.admin.acl=*
    mapreduce.jobhistory.admin.address=0.0.0.0:10033
    mapreduce.jobhistory.cleaner.enable=true
    mapreduce.jobhistory.cleaner.interval-ms=86400000
    mapreduce.jobhistory.client.thread-count=10
    mapreduce.jobhistory.datestring.cache.size=200000
    mapreduce.jobhistory.done-dir=${yarn.app.mapreduce.am.staging-dir}/history/done
    mapreduce.jobhistory.http.policy=HTTP_ONLY
    mapreduce.jobhistory.intermediate-done-dir=${yarn.app.mapreduce.am.staging-dir}/history/done_intermediate
    mapreduce.jobhistory.joblist.cache.size=20000
    mapreduce.jobhistory.keytab=/etc/security/keytab/jhs.service.keytab
    mapreduce.jobhistory.loadedjobs.cache.size=5
    mapreduce.jobhistory.max-age-ms=604800000
    mapreduce.jobhistory.minicluster.fixed.ports=false
    mapreduce.jobhistory.move.interval-ms=180000
    mapreduce.jobhistory.move.thread-count=3
    mapreduce.jobhistory.principal=jhs/_HOST@REALM.TLD
    mapreduce.jobhistory.recovery.enable=false
    mapreduce.jobhistory.recovery.store.class=org.apache.hadoop.mapreduce.v2.hs.HistoryServerFileSystemStateStoreService
    mapreduce.jobhistory.recovery.store.fs.uri=${hadoop.tmp.dir}/mapred/history/recoverystore
    mapreduce.jobhistory.webapp.address=0.0.0.0:19888
    mapreduce.jobtracker.address=local
    mapreduce.jobtracker.expire.trackers.interval=600000
    mapreduce.jobtracker.handler.count=10
    mapreduce.jobtracker.heartbeats.in.second=100
    mapreduce.jobtracker.http.address=0.0.0.0:50030
    mapreduce.jobtracker.instrumentation=org.apache.hadoop.mapred.JobTrackerMetricsInst
    mapreduce.jobtracker.jobhistory.block.size=3145728
    mapreduce.jobtracker.jobhistory.lru.cache.size=5
    mapreduce.jobtracker.jobhistory.task.numberprogresssplits=12
    mapreduce.jobtracker.maxtasks.perjob=-1
    mapreduce.jobtracker.persist.jobstatus.active=true
    mapreduce.jobtracker.persist.jobstatus.dir=/jobtracker/jobsInfo
    mapreduce.jobtracker.persist.jobstatus.hours=1
    mapreduce.jobtracker.restart.recover=false
    mapreduce.jobtracker.retiredjobs.cache.size=1000
    mapreduce.jobtracker.staging.root.dir=${hadoop.tmp.dir}/mapred/staging
    mapreduce.jobtracker.system.dir=${hadoop.tmp.dir}/mapred/system
    mapreduce.jobtracker.taskcache.levels=2
    mapreduce.jobtracker.taskscheduler=org.apache.hadoop.mapred.JobQueueTaskScheduler
    mapreduce.jobtracker.tasktracker.maxblacklists=4
    mapreduce.local.clientfactory.class.name=org.apache.hadoop.mapred.LocalClientFactory
    mapreduce.map.cpu.vcores=1
    mapreduce.map.log.level=INFO
    mapreduce.map.maxattempts=4
    mapreduce.map.output.compress=false
    mapreduce.map.output.compress.codec=org.apache.hadoop.io.compress.DefaultCodec
    mapreduce.map.skip.maxrecords=0
    mapreduce.map.skip.proc.count.autoincr=true
    mapreduce.map.sort.spill.percent=0.80
    mapreduce.map.speculative=true
    mapreduce.output.fileoutputformat.compress=false
    mapreduce.output.fileoutputformat.compress.codec=org.apache.hadoop.io.compress.DefaultCodec
    mapreduce.output.fileoutputformat.compress.type=RECORD
    mapreduce.reduce.cpu.vcores=1
    mapreduce.reduce.input.buffer.percent=0.0
    mapreduce.reduce.log.level=INFO
    mapreduce.reduce.markreset.buffer.percent=0.0
    mapreduce.reduce.maxattempts=4
    mapreduce.reduce.merge.inmem.threshold=1000
    mapreduce.reduce.shuffle.connect.timeout=180000
    mapreduce.reduce.shuffle.input.buffer.percent=0.70
    mapreduce.reduce.shuffle.memory.limit.percent=0.25
    mapreduce.reduce.shuffle.merge.percent=0.66
    mapreduce.reduce.shuffle.parallelcopies=5
    mapreduce.reduce.shuffle.read.timeout=180000
    mapreduce.reduce.shuffle.retry-delay.max.ms=60000
    mapreduce.reduce.skip.maxgroups=0
    mapreduce.reduce.skip.proc.count.autoincr=true
    mapreduce.reduce.speculative=true
    mapreduce.shuffle.connection-keep-alive.enable=false
    mapreduce.shuffle.connection-keep-alive.timeout=5
    mapreduce.shuffle.max.connections=0
    mapreduce.shuffle.max.threads=0
    mapreduce.shuffle.port=13562
    mapreduce.shuffle.ssl.enabled=false
    mapreduce.shuffle.ssl.file.buffer.size=65536
    mapreduce.shuffle.transfer.buffer.size=131072
    mapreduce.task.files.preserve.failedtasks=false
    mapreduce.task.io.sort.factor=10
    mapreduce.task.io.sort.mb=100
    mapreduce.task.merge.progress.records=10000
    mapreduce.task.profile=false
    mapreduce.task.profile.map.params=${mapreduce.task.profile.params}
    mapreduce.task.profile.maps=0-2
    mapreduce.task.profile.reduce.params=${mapreduce.task.profile.params}
    mapreduce.task.profile.reduces=0-2
    mapreduce.task.skip.start.attempts=2
    mapreduce.task.timeout=600000
    mapreduce.task.tmp.dir=./tmp
    mapreduce.task.userlog.limit.kb=0
    mapreduce.tasktracker.dns.interface=default
    mapreduce.tasktracker.dns.nameserver=default
    mapreduce.tasktracker.healthchecker.interval=60000
    mapreduce.tasktracker.healthchecker.script.timeout=600000
    mapreduce.tasktracker.http.address=0.0.0.0:50060
    mapreduce.tasktracker.http.threads=40
    mapreduce.tasktracker.indexcache.mb=10
    mapreduce.tasktracker.instrumentation=org.apache.hadoop.mapred.TaskTrackerMetricsInst
    mapreduce.tasktracker.local.dir.minspacekill=0
    mapreduce.tasktracker.local.dir.minspacestart=0
    mapreduce.tasktracker.map.tasks.maximum=2
    mapreduce.tasktracker.outofband.heartbeat=false
    mapreduce.tasktracker.reduce.tasks.maximum=2
    mapreduce.tasktracker.report.address=127.0.0.1:0
    mapreduce.tasktracker.taskcontroller=org.apache.hadoop.mapred.DefaultTaskController
    mapreduce.tasktracker.taskmemorymanager.monitoringinterval=5000
    mapreduce.tasktracker.tasks.sleeptimebeforesigkill=5000
    net.topology.impl=org.apache.hadoop.net.NetworkTopology
    net.topology.node.switch.mapping.impl=org.apache.hadoop.net.ScriptBasedMapping
    net.topology.script.number.args=100
    nfs3.mountd.port=4272
    nfs3.server.port=2079
    rpc.metrics.quantile.enable=false
    s3.blocksize=67108864
    s3.bytes-per-checksum=512
    s3.client-write-packet-size=65536
    s3.replication=3
    s3.stream-buffer-size=4096
    s3native.blocksize=67108864
    s3native.bytes-per-checksum=512
    s3native.client-write-packet-size=65536
    s3native.replication=3
    s3native.stream-buffer-size=4096
    test.fs.s3.name=s3:///
    test.fs.s3n.name=s3n:///
    tfile.fs.input.buffer.size=262144
    tfile.fs.output.buffer.size=262144
    tfile.io.chunk.size=1048576
    yarn.acl.enable=false
    yarn.admin.acl=*
    yarn.am.liveness-monitor.expiry-interval-ms=600000
    yarn.app.mapreduce.am.command-opts=-Xmx1024m
    yarn.app.mapreduce.am.container.log.backups=0
    yarn.app.mapreduce.am.container.log.limit.kb=0
    yarn.app.mapreduce.am.job.committer.cancel-timeout=60000
    yarn.app.mapreduce.am.job.committer.commit-window=10000
    yarn.app.mapreduce.am.job.task.listener.thread-count=30
    yarn.app.mapreduce.am.resource.cpu-vcores=1
    yarn.app.mapreduce.am.resource.mb=1536
    yarn.app.mapreduce.am.scheduler.heartbeat.interval-ms=1000
    yarn.app.mapreduce.am.staging-dir=/tmp/hadoop-yarn/staging
    yarn.app.mapreduce.client-am.ipc.max-retries=3
    yarn.app.mapreduce.client-am.ipc.max-retries-on-timeouts=3
    yarn.app.mapreduce.client.max-retries=3
    yarn.app.mapreduce.task.container.log.backups=0
    yarn.client.application-client-protocol.poll-interval-ms=200
    yarn.client.failover-proxy-provider=org.apache.hadoop.yarn.client.ConfiguredRMFailoverProxyProvider
    yarn.client.failover-retries=0
    yarn.client.failover-retries-on-socket-timeouts=0
    yarn.client.max-nodemanagers-proxies=500
    yarn.client.nodemanager-client-async.thread-pool-max-size=500
    yarn.http.policy=HTTP_ONLY
    yarn.ipc.rpc.class=org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC
    yarn.ipc.serializer.type=protocolbuffers
    yarn.log-aggregation-enable=false
    yarn.log-aggregation.retain-check-interval-seconds=-1
    yarn.log-aggregation.retain-seconds=-1
    yarn.nm.liveness-monitor.expiry-interval-ms=600000
    yarn.nodemanager.address=${yarn.nodemanager.hostname}:0
    yarn.nodemanager.admin-env=MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX
    yarn.nodemanager.aux-services.mapreduce_shuffle.class=org.apache.hadoop.mapred.ShuffleHandler
    yarn.nodemanager.container-executor.class=org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor
    yarn.nodemanager.container-manager.thread-count=20
    yarn.nodemanager.container-monitor.interval-ms=3000
    yarn.nodemanager.container-monitor.procfs-tree.smaps-based-rss.enabled=false
    yarn.nodemanager.delete.debug-delay-sec=0
    yarn.nodemanager.delete.thread-count=4
    yarn.nodemanager.disk-health-checker.interval-ms=120000
    yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage=100.0
    yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb=0
    yarn.nodemanager.disk-health-checker.min-healthy-disks=0.25
    yarn.nodemanager.env-whitelist=JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,HADOOP_YARN_HOME
    yarn.nodemanager.health-checker.interval-ms=600000
    yarn.nodemanager.health-checker.script.timeout-ms=1200000
    yarn.nodemanager.hostname=0.0.0.0
    yarn.nodemanager.keytab=/etc/krb5.keytab
    yarn.nodemanager.linux-container-executor.cgroups.hierarchy=/hadoop-yarn
    yarn.nodemanager.linux-container-executor.cgroups.mount=false
    yarn.nodemanager.linux-container-executor.nonsecure-mode.local-user=nobody
    yarn.nodemanager.linux-container-executor.nonsecure-mode.user-pattern=^[_.A-Za-z0-9][-@_.A-Za-z0-9]{0,255}?[$]?$
    yarn.nodemanager.linux-container-executor.resources-handler.class=org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler
    yarn.nodemanager.local-cache.max-files-per-directory=8192
    yarn.nodemanager.local-dirs=${hadoop.tmp.dir}/nm-local-dir
    yarn.nodemanager.localizer.address=${yarn.nodemanager.hostname}:8040
    yarn.nodemanager.localizer.cache.cleanup.interval-ms=600000
    yarn.nodemanager.localizer.cache.target-size-mb=10240
    yarn.nodemanager.localizer.client.thread-count=5
    yarn.nodemanager.localizer.fetch.thread-count=4
    yarn.nodemanager.log-aggregation.compression-type=none
    yarn.nodemanager.log-dirs=${yarn.log.dir}/userlogs
    yarn.nodemanager.log.retain-seconds=10800
    yarn.nodemanager.pmem-check-enabled=true
    yarn.nodemanager.process-kill-wait.ms=2000
    yarn.nodemanager.remote-app-log-dir=/tmp/logs
    yarn.nodemanager.remote-app-log-dir-suffix=logs
    yarn.nodemanager.resource.cpu-vcores=8
    yarn.nodemanager.resource.memory-mb=8192
    yarn.nodemanager.resourcemanager.connect.retry_interval.secs=30
    yarn.nodemanager.resourcemanager.connect.wait.secs=900
    yarn.nodemanager.resourcemanager.minimum.version=NONE
    yarn.nodemanager.sleep-delay-before-sigkill.ms=250
    yarn.nodemanager.vmem-check-enabled=true
    yarn.nodemanager.vmem-pmem-ratio=2.1
    yarn.nodemanager.webapp.address=${yarn.nodemanager.hostname}:8042
    yarn.resourcemanager.address=localhost:18032
    yarn.resourcemanager.admin.address=localhost:18033
    yarn.resourcemanager.admin.client.thread-count=1
    yarn.resourcemanager.am.max-attempts=2
    yarn.resourcemanager.amliveliness-monitor.interval-ms=1000
    yarn.resourcemanager.application-tokens.master-key-rolling-interval-secs=86400
    yarn.resourcemanager.client.thread-count=50
    yarn.resourcemanager.configuration.provider-class=org.apache.hadoop.yarn.LocalConfigurationProvider
    yarn.resourcemanager.connect.max-wait.ms=900000
    yarn.resourcemanager.connect.retry-interval.ms=30000
    yarn.resourcemanager.container-tokens.master-key-rolling-interval-secs=86400
    yarn.resourcemanager.container.liveness-monitor.interval-ms=600000
    yarn.resourcemanager.delayed.delegation-token.removal-interval-ms=30000
    yarn.resourcemanager.fs.state-store.retry-policy-spec=2000, 500
    yarn.resourcemanager.fs.state-store.uri=${hadoop.tmp.dir}/yarn/system/rmstore
    yarn.resourcemanager.ha.automatic-failover.embedded=true
    yarn.resourcemanager.ha.automatic-failover.enabled=true
    yarn.resourcemanager.ha.automatic-failover.zk-base-path=/yarn-leader-election
    yarn.resourcemanager.ha.enabled=false
    yarn.resourcemanager.history-writer.multi-threaded-dispatcher.pool-size=10
    yarn.resourcemanager.hostname=0.0.0.0
    yarn.resourcemanager.keytab=/etc/krb5.keytab
    yarn.resourcemanager.max-completed-applications=10000
    yarn.resourcemanager.nm.liveness-monitor.interval-ms=1000
    yarn.resourcemanager.nodemanager.minimum.version=NONE
    yarn.resourcemanager.nodemanagers.heartbeat-interval-ms=1000
    yarn.resourcemanager.recovery.enabled=false
    yarn.resourcemanager.resource-tracker.address=localhost:18031
    yarn.resourcemanager.resource-tracker.client.thread-count=50
    yarn.resourcemanager.scheduler.address=localhost:18030
    yarn.resourcemanager.scheduler.class=org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler
    yarn.resourcemanager.scheduler.client.thread-count=50
    yarn.resourcemanager.scheduler.monitor.enable=false
    yarn.resourcemanager.scheduler.monitor.policies=org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy
    yarn.resourcemanager.state-store.max-completed-applications=${yarn.resourcemanager.max-completed-applications}
    yarn.resourcemanager.store.class=org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore
    yarn.resourcemanager.webapp.address=localhost:18088
    yarn.resourcemanager.webapp.https.address=${yarn.resourcemanager.hostname}:8090
    yarn.resourcemanager.zk-acl=world:anyone:rwcda
    yarn.resourcemanager.zk-num-retries=500
    yarn.resourcemanager.zk-retry-interval-ms=2000
    yarn.resourcemanager.zk-state-store.parent-path=/rmstore
    yarn.resourcemanager.zk-timeout-ms=10000
    yarn.scheduler.fair.assignmultiple=true
    yarn.scheduler.fair.preemption=true
    yarn.scheduler.maximum-allocation-mb=8192
    yarn.scheduler.maximum-allocation-vcores=32
    yarn.scheduler.minimum-allocation-mb=1024
    yarn.scheduler.minimum-allocation-vcores=1
    yarn.timeline-service.address=${yarn.timeline-service.hostname}:10200
    yarn.timeline-service.enabled=false
    yarn.timeline-service.generic-application-history.enabled=false
    yarn.timeline-service.generic-application-history.fs-history-store.compression-type=none
    yarn.timeline-service.generic-application-history.fs-history-store.uri=${hadoop.tmp.dir}/yarn/timeline/generic-history
    yarn.timeline-service.generic-application-history.store-class=org.apache.hadoop.yarn.server.applicationhistoryservice.FileSystemApplicationHistoryStore
    yarn.timeline-service.handler-thread-count=10
    yarn.timeline-service.hostname=0.0.0.0
    yarn.timeline-service.leveldb-timeline-store.path=${hadoop.tmp.dir}/yarn/timeline
    yarn.timeline-service.leveldb-timeline-store.read-cache-size=104857600
    yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size=10000
    yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size=10000
    yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms=300000
    yarn.timeline-service.store-class=org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.LeveldbTimelineStore
    yarn.timeline-service.ttl-enable=true
    yarn.timeline-service.ttl-ms=604800000
    yarn.timeline-service.webapp.address=${yarn.timeline-service.hostname}:8188
    yarn.timeline-service.webapp.https.address=${yarn.timeline-service.hostname}:8190
  • 相关阅读:
    log4net 发布到生产环境不写日志的解决方法--使用 NLog日志
    centos 下Supervisor 守护进程基本配置
    centos 7 下安装Nginx
    Haproxy+asp.net +RedisSessionStateProvider 完美实现负载均衡,并且session保持
    centos之Haproxy 负载均衡学习笔记
    改进初学者的PID-介绍
    实现Modbus TCP多网段客户端应用
    有一种亲切是手机
    实现Modbus ASCII多主站应用
    爱好
  • 原文地址:https://www.cnblogs.com/ygwx/p/5075827.html
Copyright © 2011-2022 走看看