zoukankan      html  css  js  c++  java
  • 极易中文分词

    支持英文、数字、中文(简体)混合分词 
    常用的数量和人名的匹配 
    超过22万词的词库整理 
    实现正向最大匹配算法 
    
    //采用正向最大匹配的中文分词算法,相当于分词粒度等于0 
    MMAnalyzer analyzer = new MMAnalyzer(); 
    
    //参数为分词粒度:当字数等于或超过该参数,且能成词,该词就被切分出来 
    MMAnalyzer analyzer = new MMAnalyzer(2); 
    
    
    //增加一个新词典,采用每行一个词的读取方式 
    MMAnalyzer.addDictionary(reader); 
    
    //增加一个新词 
    MMAnalyzer.addWord(newWord); 
    
    
     
    
    //删除词库中的全部词语(注意:非常危险的操作,在没有加载新的词库前所有的分词都将失效)
    MMAnalyzer.clear();
    
    //词库中是否包含该词
    MMAnalyzer.contains(String word);
    
    //从词库中移除该词
    MMAnalyzer.removeWord(String word);
    
    //当前词库中包含的词语总数
    MMAnalyzer.size();
    
    
    view plaincopy to clipboardprint?
    package demo.analysis;    
      
    import java.io.IOException;    
      
    import jeasy.analysis.MMAnalyzer;    
      
    public class Segment    
    {    
      
    public static void main(String[] args)    
    {    
    String text = "据路透社报道,印度尼西亚社会事务部一官员星期二(29日)表示,"    
    + "日惹市附近当地时间27日晨5时53分发生的里氏6.2级地震已经造成至少5427人死亡,"    
    + "20000余人受伤,近20万人无家可归。";    
      
    MMAnalyzer analyzer = new MMAnalyzer();    
    try    
    {    
    System.out.println(analyzer.segment(text, " | "));    
    }    
    catch (IOException e)    
    {    
    e.printStackTrace();    
    }    
    }    
    }   
    package demo.analysis; 
    
    import java.io.IOException; 
    
    import jeasy.analysis.MMAnalyzer; 
    
    public class Segment 
    { 
    
    public static void main(String[] args) 
    { 
    String text = "据路透社报道,印度尼西亚社会事务部一官员星期二(29日)表示," 
    + "日惹市附近当地时间27日晨5时53分发生的里氏6.2级地震已经造成至少5427人死亡," 
    + "20000余人受伤,近20万人无家可归。"; 
    
    MMAnalyzer analyzer = new MMAnalyzer(); 
    try 
    { 
    System.out.println(analyzer.segment(text, " | ")); 
    } 
    catch (IOException e) 
    { 
    e.printStackTrace(); 
    } 
    } 
    }  
    
    生成效果: 
    
    据 | 路透社 | 报道 | 印度尼西亚 | 社会 | 事务 | 部 | 官员 | 星期二 | 29日 | 表示 | 日惹 | 市 | 附近 | 当地时间 | 27日 
    
    | 晨 | 5时 | 53分 | 发生 | 里氏 | 6.2级 | 地震 | 已经 | 造成 | 至少 | 5427人 | 死亡 | 20000 | 余人 | 受伤 | 近 | 20万人 | 无家可归 | 
    
    
    view plaincopy to clipboardprint?
    package demo.analysis;    
      
    import jeasy.analysis.MMAnalyzer;    
      
    import org.apache.lucene.analysis.Analyzer;    
    import org.apache.lucene.document.Document;    
    import org.apache.lucene.document.Field;    
    import org.apache.lucene.index.IndexWriter;    
    import org.apache.lucene.queryParser.QueryParser;    
    import org.apache.lucene.search.Hits;    
    import org.apache.lucene.search.IndexSearcher;    
    import org.apache.lucene.search.Query;    
    import org.apache.lucene.store.Directory;    
    import org.apache.lucene.store.RAMDirectory;    
      
    public class Segment    
    {    
      
    public static void main(String[] args)    
    {    
    String fieldName = "text";    
    String text = "据路透社报道,印度尼西亚社会事务部一官员星期二(29日)表示,"    
    + "日惹市附近当地时间27日晨5时53分发生的里氏6.2级地震已经造成至少5427人死亡,"    
    + "20000余人受伤,近20万人无家可归。"; //检索内容    
      
    //采用正向最大匹配的中文分词算法    
    Analyzer analyzer = new MMAnalyzer();    
      
    Directory directory = new RAMDirectory();    
    //Directory directory = FSDirectory.getDirectory("/tmp/testindex", true);    
      
    try    
    {    
    IndexWriter iwriter = new IndexWriter(directory, analyzer, true);    
    iwriter.setMaxFieldLength(25000);    
    Document doc = new Document();    
    doc.add(new Field(fieldName, text, Field.Store.YES, Field.Index.TOKENIZED));    
    iwriter.addDocument(doc);    
    iwriter.close();    
      
    IndexSearcher isearcher = new IndexSearcher(directory);    
    QueryParser parser = new QueryParser(fieldName, analyzer);    
    Query query = parser.parse("印度尼西亚 6.2级地震");//检索词    
    Hits hits = isearcher.search(query);    
    System.out.println("命中:" + hits.length());    
      
    for (int i = 0; i < hits.length(); i++)    
    {    
    Document hitDoc = hits.doc(i);    
    System.out.println("内容:" + hitDoc.get(fieldName));    
    }    
      
    isearcher.close();    
    directory.close();    
    }    
    catch (Exception e)    
    {    
    e.printStackTrace();    
    }    
    }    
      
    }   
    package demo.analysis; 
    
    import jeasy.analysis.MMAnalyzer; 
    
    import org.apache.lucene.analysis.Analyzer; 
    import org.apache.lucene.document.Document; 
    import org.apache.lucene.document.Field; 
    import org.apache.lucene.index.IndexWriter; 
    import org.apache.lucene.queryParser.QueryParser; 
    import org.apache.lucene.search.Hits; 
    import org.apache.lucene.search.IndexSearcher; 
    import org.apache.lucene.search.Query; 
    import org.apache.lucene.store.Directory; 
    import org.apache.lucene.store.RAMDirectory; 
    
    public class Segment 
    { 
    
    public static void main(String[] args) 
    { 
    String fieldName = "text"; 
    String text = "据路透社报道,印度尼西亚社会事务部一官员星期二(29日)表示," 
    + "日惹市附近当地时间27日晨5时53分发生的里氏6.2级地震已经造成至少5427人死亡," 
    + "20000余人受伤,近20万人无家可归。"; //检索内容 
    
    //采用正向最大匹配的中文分词算法 
    Analyzer analyzer = new MMAnalyzer(); 
    
    Directory directory = new RAMDirectory(); 
    //Directory directory = FSDirectory.getDirectory("/tmp/testindex", true); 
    
    try 
    { 
    IndexWriter iwriter = new IndexWriter(directory, analyzer, true); 
    iwriter.setMaxFieldLength(25000); 
    Document doc = new Document(); 
    doc.add(new Field(fieldName, text, Field.Store.YES, Field.Index.TOKENIZED)); 
    iwriter.addDocument(doc); 
    iwriter.close(); 
    
    IndexSearcher isearcher = new IndexSearcher(directory); 
    QueryParser parser = new QueryParser(fieldName, analyzer); 
    Query query = parser.parse("印度尼西亚 6.2级地震");//检索词 
    Hits hits = isearcher.search(query); 
    System.out.println("命中:" + hits.length()); 
    
    for (int i = 0; i < hits.length(); i++) 
    { 
    Document hitDoc = hits.doc(i); 
    System.out.println("内容:" + hitDoc.get(fieldName)); 
    } 
    
    isearcher.close(); 
    directory.close(); 
    } 
    catch (Exception e) 
    { 
    e.printStackTrace(); 
    } 
    } 
    
    }  
    
    生成效果: 
    
    命中:1 
    内容:据路透社报道,印度尼西亚社会事务部一官员星期二(29日)表示,日惹市附近当地时间27日晨5时53分发生 
    
    的里氏6.2级地震已经造成至少5427人死亡,20000余人受伤,近20万人无家可归。 
    
    
    view plaincopy to clipboardprint?
    package demo.analysis;    
      
    import jeasy.analysis.MMAnalyzer;    
      
    import org.apache.lucene.analysis.Analyzer;    
    import org.apache.lucene.analysis.TokenStream;    
    import org.apache.lucene.document.Document;    
    import org.apache.lucene.document.Field;    
    import org.apache.lucene.index.IndexReader;    
    import org.apache.lucene.index.IndexWriter;    
    import org.apache.lucene.index.TermPositionVector;    
    import org.apache.lucene.queryParser.QueryParser;    
    import org.apache.lucene.search.Hits;    
    import org.apache.lucene.search.IndexSearcher;    
    import org.apache.lucene.search.Query;    
    import org.apache.lucene.search.highlight.Highlighter;    
    import org.apache.lucene.search.highlight.QueryScorer;    
    import org.apache.lucene.search.highlight.TokenSources;    
    import org.apache.lucene.store.Directory;    
    import org.apache.lucene.store.RAMDirectory;    
      
    public class Segment    
    {    
      
    public static void main(String[] args)    
    {    
    String fieldName = "text";    
    String text = "据路透社报道,印度尼西亚社会事务部一官员星期二(29日)表示,"    
    + "日惹市附近当地时间27日晨5时53分发生的里氏6.2级地震已经造成至少5427人死亡,"    
    + "20000余人受伤,近20万人无家可归。"; //检索内容    
      
    //采用正向最大匹配的中文分词算法    
    Analyzer analyzer = new MMAnalyzer();    
      
    Directory directory = new RAMDirectory();    
    //Directory directory = FSDirectory.getDirectory("/tmp/testindex", true);    
      
    try    
    {    
    IndexWriter iwriter = new IndexWriter(directory, analyzer, true);    
    iwriter.setMaxFieldLength(25000);    
    Document doc = new Document();    
    doc.add(new Field(fieldName, text, Field.Store.YES,    
    Field.Index.TOKENIZED,    
    Field.TermVector.WITH_POSITIONS_OFFSETS));    
    iwriter.addDocument(doc);    
    iwriter.close();    
      
    IndexSearcher isearcher = new IndexSearcher(directory);    
    QueryParser parser = new QueryParser(fieldName, analyzer);    
    Query query = parser.parse("印度尼西亚 6.2级地震");//检索词    
    Hits hits = isearcher.search(query);    
    System.out.println("命中:" + hits.length());    
      
    Highlighter highlighter = new Highlighter(new QueryScorer(query));    
    for (int i = 0; i < hits.length(); i++)    
    {    
    text = hits.doc(i).get(fieldName);    
    TermPositionVector tpv = (TermPositionVector) IndexReader.open(    
    directory).getTermFreqVector(hits.id(i), fieldName);    
    TokenStream tokenStream = TokenSources.getTokenStream(tpv);    
    String result = highlighter.getBestFragments(tokenStream, text, 3, "...");    
    System.out.println("内容:" + result);    
    }    
      
    isearcher.close();    
    directory.close();    
    }    
    catch (Exception e)    
    {    
    e.printStackTrace();    
    }    
    }    
      
    }   
    package demo.analysis; 
    
    import jeasy.analysis.MMAnalyzer; 
    
    import org.apache.lucene.analysis.Analyzer; 
    import org.apache.lucene.analysis.TokenStream; 
    import org.apache.lucene.document.Document; 
    import org.apache.lucene.document.Field; 
    import org.apache.lucene.index.IndexReader; 
    import org.apache.lucene.index.IndexWriter; 
    import org.apache.lucene.index.TermPositionVector; 
    import org.apache.lucene.queryParser.QueryParser; 
    import org.apache.lucene.search.Hits; 
    import org.apache.lucene.search.IndexSearcher; 
    import org.apache.lucene.search.Query; 
    import org.apache.lucene.search.highlight.Highlighter; 
    import org.apache.lucene.search.highlight.QueryScorer; 
    import org.apache.lucene.search.highlight.TokenSources; 
    import org.apache.lucene.store.Directory; 
    import org.apache.lucene.store.RAMDirectory; 
    
    public class Segment 
    { 
    
    public static void main(String[] args) 
    { 
    String fieldName = "text"; 
    String text = "据路透社报道,印度尼西亚社会事务部一官员星期二(29日)表示," 
    + "日惹市附近当地时间27日晨5时53分发生的里氏6.2级地震已经造成至少5427人死亡," 
    + "20000余人受伤,近20万人无家可归。"; //检索内容 
    
    //采用正向最大匹配的中文分词算法 
    Analyzer analyzer = new MMAnalyzer(); 
    
    Directory directory = new RAMDirectory(); 
    //Directory directory = FSDirectory.getDirectory("/tmp/testindex", true); 
    
    try 
    { 
    IndexWriter iwriter = new IndexWriter(directory, analyzer, true); 
    iwriter.setMaxFieldLength(25000); 
    Document doc = new Document(); 
    doc.add(new Field(fieldName, text, Field.Store.YES, 
    Field.Index.TOKENIZED, 
    Field.TermVector.WITH_POSITIONS_OFFSETS)); 
    iwriter.addDocument(doc); 
    iwriter.close(); 
    
    IndexSearcher isearcher = new IndexSearcher(directory); 
    QueryParser parser = new QueryParser(fieldName, analyzer); 
    Query query = parser.parse("印度尼西亚 6.2级地震");//检索词 
    Hits hits = isearcher.search(query); 
    System.out.println("命中:" + hits.length()); 
    
    Highlighter highlighter = new Highlighter(new QueryScorer(query)); 
    for (int i = 0; i < hits.length(); i++) 
    { 
    text = hits.doc(i).get(fieldName); 
    TermPositionVector tpv = (TermPositionVector) IndexReader.open( 
    directory).getTermFreqVector(hits.id(i), fieldName); 
    TokenStream tokenStream = TokenSources.getTokenStream(tpv); 
    String result = highlighter.getBestFragments(tokenStream, text, 3, "..."); 
    System.out.println("内容:" + result); 
    } 
    
    isearcher.close(); 
    directory.close(); 
    } 
    catch (Exception e) 
    { 
    e.printStackTrace(); 
    } 
    } 
    
    } 
     
    生成效果: 
    
    命中:1 
    内容:据路透社报道,<B>印度尼西亚</B>社会事务部一官员星期二(29日)表示,日惹市附近当地时间27日晨5时53分发生的 
    
    里氏<B>6.2级</B><B>地震</B>已经造成至少5427人死亡,20000余人受伤,近20万人无家可归
    
    
    
    本文来自CSDN博客,转载请标明出处:http://blog.csdn.net/xiaoping8411/archive/2010/03/30/5435134.aspx

  • 相关阅读:
    mysql删除重复记录,保存Id最小的一条
    前段时间,接手一个项目使用的是原始的jdbc作为数据库的访问,发布到服务器上在运行了一段时间之后总是会出现无法访问的情况,登录到服务器,查看tomcat日志发现总是报如下的错误。    Caused by: com.mysql.jdbc.exceptions.jdbc4.MySQLNonTransientConnectionException: Data source rejected est
    Mysql 插入中文错误:Incorrect string value: 'xE7xA8x8BxE5xBAx8F...' for column 'course' at row 1
    Data truncation: Out of range value for column 'Quality' at row 1
    tomcat部署时war和war exploded区别
    IDEA 在某个工程下一个module如何使用另一个module中的资源文件(.xml .prop等)
    MyBatis错误:Result Maps collection already contains value for novel.storage.mapper.NovelMapper.BaseResultMap
    compareTo返回值为-1 、 1 、 0 的排序问题
    深入理解JVM+G1+GC.pdf (中文版带书签)
    轻量级微服务架构(下册)(中文版带书签)、原书代码、数据集
  • 原文地址:https://www.cnblogs.com/jamesf/p/4751547.html
Copyright © 2011-2022 走看看