cat >>/usr/lib/sysctl.d/00-system.conf <<HERE
vm.swappiness = 1
vm.max_map_count = 262144
HERE
2.调整资源限制
https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration.html
JMX内存大小
cat >>/etc/profile <<HERE
export ES_MIN_MEM=1g
export ES_MAX_MEM=1g
HERE
source /etc/profile
或直接修改环境变更定义脚本/opt/elasticsearch-2.3.2/bin/elasticsearch.in.sh
文件描述符数量及memlock
cat >>/etc/security/limits.conf <<HERE
elasticsearch soft nofile 65536
elasticsearch hard nofile 65536
elasticsearch soft memlock unlimited
elasticsearch hard memlock unlimited
HERE
三.elasticsearch配置(config/elasticsearch.yml)
mv /opt/elasticsearch-2.3.2/config/elasticsearch.yml{,.default}
cat >/opt/elasticsearch-2.3.2/config/elasticsearch.yml <<HERE
cluster.name:
node.name: ${HOSTNAME}
node.master: false
node.data: false
path.data: /opt/elasticsearch-2.3.2/data
path.logs: /opt/elasticsearch-2.3.2/logs
bootstrap.mlockall: true
network.host: 127.0.0.1,192.168.8.10
http.port: 9200
script.inline: true
script.indexed: true
mv /opt/elasticsearch-2.3.2/config/elasticsearch.yml{,.default}
cat >/opt/elasticsearch-2.3.2/config/elasticsearch.yml <<HERE
cluster.name:
node.name: ${HOSTNAME}
node.master: true
node.data:
path.data: /opt/elasticsearch-2.3.2/data
path.logs: /opt/elasticsearch-2.3.2/logs
bootstrap.mlockall: true
network.host:
http.port: 9200
script.inline: true
script.indexed: true
mv /opt/elasticsearch-2.3.2/config/elasticsearch.yml{,.default}
cat >/opt/elasticsearch-2.3.2/config/elasticsearch.yml <<HERE
cluster.name:
node.name: ${HOSTNAME}
node.master:
node.data:
path.data: /opt/elasticsearch-2.3.2/data
path.logs: /opt/elasticsearch-2.3.2/logs
bootstrap.mlockall: true
network.host:
http.port: 9200
script.inline: true
script.indexed: true
mv /opt/elasticsearch-2.3.2/config/elasticsearch.yml{,.default}
cat >/opt/elasticsearch-2.3.2/config/elasticsearch.yml <<HERE
cluster.name:
node.name: ${HOSTNAME}
node.master:
node.data:
path.data: /opt/elasticsearch-2.3.2/data
path.logs: /opt/elasticsearch-2.3.2/logs
bootstrap.mlockall: true
network.host:
http.port: 9200
script.inline: true
script.indexed: true
mv /opt/elasticsearch-2.3.2/config/elasticsearch.yml{,.default}
cat >/opt/elasticsearch-2.3.2/config/elasticsearch.yml <<HERE
cluster.name:
node.name: ${HOSTNAME}
node.master:
node.data:
path.data: /opt/elasticsearch-2.3.2/data
path.logs: /opt/elasticsearch-2.3.2/logs
bootstrap.mlockall: true
network.host:
http.port: 9200
script.inline: true
script.indexed: true
mv /opt/elasticsearch-2.3.2/config/elasticsearch.yml{,.default}
cat >/opt/elasticsearch-2.3.2/config/elasticsearch.yml <<HERE
cluster.name:
node.name: ${HOSTNAME}
node.master:
node.data:
path.data: /opt/elasticsearch-2.3.2/data
path.logs: /opt/elasticsearch-2.3.2/logs
bootstrap.mlockall: true
network.host:
http.port: 9200
script.inline: true
script.indexed: true
node:
node:
path:
path:
bootstrap:
network:
http:
su - elasticsearch -c
"/opt/elasticsearch-2.3.2/bin/elasticsearch
--cluster.name
elasticsearch_cluster --node.name elastic1.example.com --path.data
/opt/elasticsearch-2.3.2/data --path.logs
/opt/elasticsearch-2.3.2/logs
--bootstrap.mlockall
集群模式下默认监听在9300端口
[root@ela-master1 ~]# netstat -tunlp|grep java
tcp
tcp
tcp
tcp
[root@ela-master1 ~]# curl 'http://localhost:9200/_nodes/process?pretty'
{
}
[root@ela-master1 ~]# curl 'http://localhost:9200/_nodes/stats/process?pretty'
{
}
提示:JMX调大后,如果进程被OOM,请检查虚机内存大小,本人测试是使用的512MB内存的主机,JMX最小最大都设置的1GB,结果一启动就OOM,后来增加内存后就正常了,更多的报错请查看日志/opt/elasticsearch-2.3.2/logs/elasticsearch_cluster.log
启动其它节点(略)
五.创建集群(unicast模式)
提示:multicast组播模式需要安装插件
/opt/elasticsearch-2.3.2/bin/plugin install discovery-multicast
通过discovery模块来将节点加入集群
在以上节点的配置文件/opt/elasticsearch-2.3.2/config/elasticsearch.yml中加入如下行后重启
cat >>/opt/elasticsearch-2.3.2/config/elasticsearch.yml <<HERE
discovery.zen.ping.timeout: 100s
discovery.zen.fd.ping_timeout: 100s
discovery.zen.ping.multicast.enabled: false
discovery.zen.ping.unicast.hosts:
["192.168.8.10:9300","192.168.8.101:9300",
discovery.zen.minimum_master_nodes: 2
gateway.recover_after_nodes: 2
HERE
This setting should be set to a
(master_eligible_nodes / 2) + 1
In other words, if there are three master-eligible nodes, then
minimum master nodes should be set to (3
/ 2) + 1
2
:
This setting can also be changed dynamically on a live cluster with
the
PUT _cluster/settings { "transient": { "discovery.zen.minimum_master_nodes": 2 } }

An advantage of splitting the master and data roles between
dedicated nodes is that you can have just three master-eligible
nodes and set minimum_master_nodes
2
.
You never have to change this setting, no matter how many dedicated
data nodes you add to the cluster.
节点全部启动完成后,在任意节点上都可以查看集群状态
[root@ela-master3 ~]# curl 'http://localhost:9200/_nodes/process?pretty'
{
}
日志中也有类似信息
tail -f /opt/elasticsearch-2.3.2/logs/elasticsearch_cluster.log
detected_master {ela-master2.example.com}{RImoGoyDQue5PxKjjuGXJA}{192.168.8.102}{192.168.8.102:9300}{data=false, master=true},
added {{ela-master2.example.com}{RImoGoyDQue5PxKjjuGXJA}{192.168.8.102}{192.168.8.102:9300}{data=false, master=true},
{ela-master3.example.com}{h_jajHU_Rea7EUjKtQUnDA}{192.168.8.103}{192.168.8.103:9300}{data=false, master=true},},
reason: zen-disco-receive(from master [{ela-master2.example.com}{RImoGoyDQue5PxKjjuGXJA}{192.168.8.102}{192.168.8.102:9300}{data=false, master=true}])
[2016-05-07
17:08:57,679][INFO ][cluster.service
added {{ela-client.example.com}{PMmbEGuoTyCR54FbGt5bpg}{192.168.8.10}{192.168.8.10:9300}{data=false, master=false},},
reason: zen-disco-receive(from master [{ela-master2.example.com}{RImoGoyDQue5PxKjjuGXJA}{192.168.8.102}{192.168.8.102:9300}{data=false, master=true}])
[2016-05-07
17:10:14,944][INFO ][cluster.service
added
{{ela-data1.example.com}{dAp0lnpVTPavfTuiabM-hg}{192.168.8.201}{192.168.8.201:9300}{master=false},},
reason: zen-disco-receive(from master [{ela-master2.example.com}{RImoGoyDQue5PxKjjuGXJA}{192.168.8.102}{192.168.8.102:9300}{data=false, master=true}])
[2016-05-07
17:11:14,109][INFO ][cluster.service
added
{{ela-data2.example.com}{TMN4Rr72TTaWpPdve8H5dQ}{192.168.8.202}{192.168.8.202:9300}{master=false},},
reason: zen-disco-receive(from master [{ela-master2.example.com}{RImoGoyDQue5PxKjjuGXJA}{192.168.8.102}{192.168.8.102:9300}{data=false, master=true}])
[root@ela-client ~]# curl 'localhost:9200/_cat/indices?v'
health
status index
green
green
green
green
green
green
[root@ela-client ~]# curl 'http://localhost:9200/_cat/health?v'
epoch
1462623617 20:20:17
mv /opt/elasticsearch-2.3.2/config/elasticsearch.yml{,.default}
cat >/opt/elasticsearch-2.3.2/config/elasticsearch.yml <<HERE
cluster.name:
node.name: ${HOSTNAME}
path.data: /opt/elasticsearch-2.3.2/data
path.logs: /opt/elasticsearch-2.3.2/logs
bootstrap.mlockall: true
network.host:
http.port: 9200
script.inline: true
script.indexed: true
mv /opt/elasticsearch-2.3.2/config/elasticsearch.yml{,.default}
cat >/opt/elasticsearch-2.3.2/config/elasticsearch.yml <<HERE
cluster.name:
node.name: ${HOSTNAME}
path.data: /opt/elasticsearch-2.3.2/data
path.logs: /opt/elasticsearch-2.3.2/logs
bootstrap.mlockall: true
network.host:
http.port: 9200
script.inline: true
script.indexed: true
mv /opt/elasticsearch-2.3.2/config/elasticsearch.yml{,.default}
cat >/opt/elasticsearch-2.3.2/config/elasticsearch.yml <<HERE
cluster.name:
node.name: ${HOSTNAME}
path.data: /opt/elasticsearch-2.3.2/data
path.logs: /opt/elasticsearch-2.3.2/logs
bootstrap.mlockall: true
network.host:
http.port: 9200
script.inline: true
script.indexed: true
[root@elastic1 ~]# curl 'http://192.168.8.101:9200/_cat/health?v'
epoch
1462205047
00:04:07
[root@elastic1 ~]# curl 'http://192.168.8.102:9200/_cat/health?v'
epoch
1462205053
00:04:13
[root@elastic1 ~]# curl 'http://192.168.8.103:9200/_cat/health?v'
epoch
1462205057
00:04:17
在以上节点的配置文件/opt/elasticsearch-2.3.2/config/elasticsearch.yml中加入如下行后重启
cat >>/opt/elasticsearch-2.3.2/config/elasticsearch.yml <<HERE
discovery.zen.ping.timeout: 100s
discovery.zen.fd.ping_timeout: 100s
discovery.zen.ping.multicast.enabled: false
discovery.zen.ping.unicast.hosts:
discovery.zen.minimum_master_nodes:
gateway.recover_after_nodes:
HERE
192.168.8.103重启后日志的一段,可以看到192.168.8.101被当作了master
[2016-05-03
00:29:47,837][INFO ][env
[2016-05-03
00:29:49,165][INFO ][node
[2016-05-03
00:29:49,165][INFO ][node
[2016-05-03
00:29:49,231][INFO ][transport
[2016-05-03
00:29:49,236][INFO ][discovery
[2016-05-03
00:29:52,357][INFO ][cluster.service] [elastic3.example.com]
detected_master
{elastic1.example.com}{VbXED9YbQ7qQGcSZ0TlKPQ}{192.168.8.101}{192.168.8.101:9300},
added {{elastic2.example.com}{v_yQRvrYT9GHumJ7kIzpUQ}{192.168.8.102}{192.168.8.102:9300},{elastic1.example.com}{VbXED9YbQ7qQGcSZ0TlKPQ}{192.168.8.101}{192.168.8.101:9300},},
reason: zen-disco-receive(from master [{elastic1.example.com}{VbXED9YbQ7qQGcSZ0TlKPQ}{192.168.8.101}{192.168.8.101:9300}])
[2016-05-03
00:29:52,401][INFO ][http
[2016-05-03
00:29:52,401][INFO ][node
https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html
[root@elastic3 ~]# curl 'http://192.168.8.101:9200/_cat/health?v'
epoch
1462207733 00:48:53
[root@elastic3 ~]# curl 'http://192.168.8.102:9200/_cat/health?v'
epoch
1462207744
00:49:04
[root@elastic3 ~]# curl 'http://192.168.8.103:9200/_cat/health?v'
epoch
1462207751
00:49:11
[root@elastic3 ~]# curl-XGET 'http://192.168.8.101:9200/_cluster/health/192.168.8.*'
{"cluster_name":"elasticsearch_cluster","status":"green","timed_out":false,"number_of_nodes":3,
"number_of_data_nodes":3,"active_primary_shards":0,"active_shards":0,"relocating_shards":0,
"initializing_shards":0,"unassigned_shards":0,"delayed_unassigned_shards":0,"number_of_pending_tasks":0,
"number_of_in_flight_fetch":0,"task_max_waiting_in_queue_millis":0,"active_shards_percent_as_number":100.0}