利用TermDocs也是可以得到一些有用的结果。。。代码如下
- package com.fpi.lucene.studying.myfirstlucene;
- import java.io.File;
- import java.io.IOException;
- import org.apache.lucene.LucenePackage;
- import org.apache.lucene.analysis.Analyzer;
- import org.apache.lucene.analysis.standard.StandardAnalyzer;
- import org.apache.lucene.document.Document;
- import org.apache.lucene.index.CorruptIndexException;
- import org.apache.lucene.index.IndexFileNameFilter;
- import org.apache.lucene.index.IndexReader;
- import org.apache.lucene.index.Term;
- import org.apache.lucene.index.TermDocs;
- import org.apache.lucene.queryParser.ParseException;
- import org.apache.lucene.queryParser.QueryParser;
- import org.apache.lucene.search.IndexSearcher;
- import org.apache.lucene.search.Query;
- import org.apache.lucene.search.ScoreDoc;
- import org.apache.lucene.search.TopScoreDocCollector;
- import org.apache.lucene.store.FSDirectory;
- import org.apache.lucene.util.Version;
- public class Searcher {
- // 关键字,要搜查的对象
- public static String key_word = "about";
- public static String field = "contents";
- public static void search() throws CorruptIndexException, IOException, ParseException{
- //打开索引所在地
- IndexSearcher sr = new IndexSearcher(FSDirectory.open(new File("d://test//myindex")),true);
- //词法分析器
- Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_30);
- //解析器
- QueryParser parser = new QueryParser(Version.LUCENE_30,field,analyzer);
- //根据关键字查询
- Query query = parser.parse(key_word);
- TopScoreDocCollector collector = TopScoreDocCollector.create(100, false);
- //将所搜寻出来的结果以特定的形式放在collector中
- sr.search(query, collector);
- /**
- * topDocs():Returns the top docs that were collected by this collector.
- * 返回的是由这个collector收集的顶级文档。
- * .scoreDocs():The top hits for the query.
- * 用于查询的最高命中。
- */
- ScoreDoc[] hits = collector.topDocs().scoreDocs;
- System.out.println("搜索到符合标准的文档数目:"+hits.length);
- //检索,根据关键字在contents这个field里边找,本节关注点。
- TermDocs temDocs = sr.getIndexReader().termDocs(new Term("contents",key_word));
- while(temDocs.next()){
- System.out.println("在文件"+sr.getIndexReader().document(temDocs.doc())+"中," +
- "关键字出现了"+temDocs.freq()+"次。");
- }
- System.out.println("-----------------我是无敌的分割线----------------");
- for (int i = 0; i < hits.length; i++) {
- Document doc = sr.doc(hits[i].doc); //依此遍历符合标准的文档
- System.out.println(doc.getField("filename")+" ---- "+hits[i].toString()+" ---- ");
- }
- System.out.println("you have " + collector.getTotalHits() +
- " file matched query '" + key_word + "':");
- System.out.println("Lucene's position is in:"+LucenePackage.get());
- }
- public static void main(String[] args) {
- try {
- search();
- } catch (CorruptIndexException e) {
- // TODO Auto-generated catch block
- e.printStackTrace();
- } catch (IOException e) {
- // TODO Auto-generated catch block
- e.printStackTrace();
- } catch (ParseException e) {
- // TODO Auto-generated catch block
- e.printStackTrace();
- }
- }
- }
要说明的是,如果将检索到的结果放在TopScoreDocCollector中 那么他里边的顺序是按照相关度得分从大到小排序的,也即是说最大的排最前。而利用TermDocs:
- TermDocs temDocs = sr.getIndexReader().termDocs(new Term("contents",key_word));
- while(temDocs.next()){
- System.out.println("在文件"+sr.getIndexReader().document(temDocs.doc())+"中," +
- "关键字出现了"+temDocs.freq()+"次。");
- }
他这个“容器”里边的数据是无序的。
还有说明Term的初始化,有2个,一般常用的是new Term(field名称,关键字);
如果将 TermDocs temDocs = sr.getIndexReader().termDocs(new Term("contents",key_word));
错误的写成 TermDocs temDocs = sr.getIndexReader().termDocs();
也就是无参初始化,则得到的结果就是每个文件他的freq都会取到值是1.