elasticsearch 6.0 中java api的使用
1:使用java api创建elasticsearch客户端
package com.search.elasticsearch; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.transport.client.PreBuiltTransportClient; import java.io.IOException; import java.io.InputStream; import java.net.InetAddress; import java.util.Properties; public class ElasticsearchConfig { private static TransportClient client; public TransportClient getElasticsearchClient() { try { Settings settings = Settings.builder() .put("cluster.name", "my-esLearn") //连接的集群名 .put("client.transport.ignore_cluster_name", true) //如果集群名不对,也能连接 .build(); //创建client client = new PreBuiltTransportClient(settings) .addTransportAddress(new TransportAddress(InetAddress.getByName("127.0.0.1"), 9300)); //主机和端口号 return client; } catch (Exception e) { e.printStackTrace(); } return null; } }
2:使用客户端创建索引,索引中 某些字段指定ik分词器等
package com.search.elasticsearch;
import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequestBuilder; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.IndicesAdminClient; import org.elasticsearch.client.Requests; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.transport.client.PreBuiltTransportClient; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Value; import java.io.IOException; import java.io.InputStream; import java.net.InetAddress; import java.util.Date; import java.util.List; import java.util.Properties; import java.util.ResourceBundle; import java.util.concurrent.ExecutionException; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; public class ElasticSearchUtil { private static TransportClient client; public ElasticSearchUtil() { this.client=new ElasticsearchConfig().getElasticsearchClient(); //使用上面创建好的客户端添加到类中。 } //创建索引,并给索引某些字段指定iK分词,以后向该索引中查询时,就会用ik分词。 public void createIndex() throws IOException { //创建映射 XContentBuilder mapping = XContentFactory.jsonBuilder() .startObject() .startObject("properties") // .startObject("m_id").field("type","keyword").endObject()
//title:字段名, type:文本类型 analyzer :分词器类型
.startObject("title").field("type", "text").field("analyzer", "ik_smart").endObject() //该字段添加的内容,查询时将会使用ik_smart分词 .startObject("content").field("type", "text").field("analyzer", "ik_max_word").endObject() .endObject() .endObject(); //index:索引名 type:类型名(可以自己定义) PutMappingRequest putmap = Requests.putMappingRequest("index").type("type").source(mapping); //创建索引 client.admin().indices().prepareCreate("index").execute().actionGet(); //为索引添加映射 client.admin().indices().putMapping(putmap).actionGet(); } }
这个时候索引就创建好了,mapping不能掉
3: 向上一步创建的索引中添加内容,包括id,id不能重复
public void createIndex1() throws IOException { IndexResponse response = client.prepareIndex("index", "type", "1") //索引,类型,id .setSource(jsonBuilder() .startObject() .field("title", "title") //字段,值 .field("content", "content") .endObject() ).get(); }
使用postman查询该索引:
4:更新索引,更新刚才创建的索引,如果id相同将会覆盖掉刚才的内容
public void updateByClient() throws IOException, ExecutionException, InterruptedException { //每次添加id应该不同,相当于数据表中的主键,相同 的话将会进行覆盖 UpdateResponse response = client.update(new UpdateRequest("index", "type", "1") .doc(XContentFactory.jsonBuilder() .startObject() .field("title", "中华人民共和国国歌,国歌是最好听的歌") .field("content","中华人民共和国国歌,国歌是最好听的歌") .endObject() )).get(); }
使用postman查看该索引的内容
5:对索引进行查询,因为分词不同,分词器将会对要查询的内容先分词,再在子段中查询。
查询 子段 content
查询结果:
对title子段进行查询:
查询结果:
6:向 索引中再添加一条数据
public void createIndex2() throws IOException { IndexResponse response = client.prepareIndex("index", "type", "2") .setSource(jsonBuilder() .startObject() .field("title", "中华民族是伟大的民族") .field("content", "中华民族是伟大的民族") .endObject() ).get(); }
对字段content进行查询:
结果:两条数据都能查到,因为对查询内容 “中华人民共和国国歌” 进行细粒度划分,含有“中华” 一词,两条数据中都包含“中华”。
对字段title 进行查询:
查询结果: 只有一条数据,因为对title 使用的是 粗粒度分词
7:search api的操作:
public void search() {
SearchResponse response1 = client.prepareSearch("index1", "index") //指定多个索引
.setTypes("type1", "type") //指定类型
.setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(QueryBuilders.matchQuery("title", "中华人民共和国国歌")) // Query
// .setPostFilter(QueryBuilders.rangeQuery("age").from(12).to(18)) // Filter
.setFrom(0).setSize(60).setExplain(true)
.get();
long totalHits1= response1.getHits().totalHits; //命中个数
System.out.println(totalHits1);
SearchResponse response2 = client.prepareSearch("index1", "index") //指定多个索引
.setTypes("type1", "type") //指定类型
.setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(QueryBuilders.matchQuery("content", "中华人民共和国国歌")) // Query
// .setPostFilter(QueryBuilders.rangeQuery("age").from(12).to(18)) // Filter
.setFrom(0).setSize(60).setExplain(true)
.get();
long totalHits2 = response2.getHits().totalHits; //命中个数
System.out.println(totalHits2);
}
8:Get Api操作:
public void get() { GetResponse response = client.prepareGet("index", "type", "1").get(); Map<String, Object> source = response.getSource(); Set<String> strings = source.keySet(); Iterator<String> iterator = strings.iterator(); while (iterator.hasNext()) { System.out.println(source.get(iterator.next())); } }
9:bulk api 批量创建索引,并添加数据
/** * 批量创建索引,并添加数据 * @throws IOException */ public void bulkApi() throws IOException { BulkRequestBuilder bulkRequest = client.prepareBulk(); // either use client#prepare, or use Requests# to directly build index/delete requests bulkRequest.add(client.prepareIndex("twitter", "tweet", "1") .setSource(jsonBuilder() .startObject() .field("user", "kimchy") .field("postDate", new Date()) .field("message", "trying out Elasticsearch") .endObject() ) ); bulkRequest.add(client.prepareIndex("twitter", "tweet", "2") .setSource(jsonBuilder() .startObject() .field("user", "kimchy") .field("postDate", new Date()) .field("message", "another post") .endObject() ) ); BulkResponse bulkResponse = bulkRequest.get(); if (bulkResponse.hasFailures()) { // process failures by iterating through each bulk response item } }
10 将搜索得到的数据以json数据形式返回。
/** * 商品搜索 */ @RequestMapping("/productSearch") @ResponseBody public JSONObject productSearch(String text) { SearchResponse response1 = client.prepareSearch("product", "index") //指定多个索引 .setTypes("product", "type") //指定类型 .setSearchType(SearchType.QUERY_THEN_FETCH) .setQuery(QueryBuilders.matchQuery("name", text)) // Query .setFrom(0).setSize(60).setExplain(true) .get(); SearchHit[] searchHits = response1.getHits().getHits();//命中个数 JSONObject jsonObject = new JSONObject(); for (int i = 0; i < searchHits.length; i++) { String sourceAsString = searchHits[i].getSourceAsString(); jsonObject.put(i+"",sourceAsString); } return jsonObject; }
es比较快的原因:https://www.jianshu.com/p/ed7e1ebb2fb7
java api 官方文档:https://www.elastic.co/guide/en/elasticsearch/client/java-api/6.0/java-docs-index.html