lucene3.0.0 field 用法 及其 Store Index方式
lucene3.0.0 field 用法 及其 Store Index方式
package com.txt.test2;
import java.io.File;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.util.Version;
import org.junit.Test;
//Field Store Index方式
public class FieldTest {
private String txt1 = "中华人 为人民服务";
private String txt2 = "中华人 为人民服务 学习雷锋好榜样";
//分词器
private Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_30);
//索引目录
private File file = new File("f:"+File.separator+"indexDir9");
//创建索引
@Test
public void create () throws Exception{
Directory directory = new SimpleFSDirectory(file);
//使用覆盖 追加的方式建立索引
IndexWriter writer = new IndexWriter(directory, analyzer, MaxFieldLength.LIMITED);
Document document = new Document();
document.add(new Field("content", txt1, Store.YES, Index.ANALYZED));
writer.addDocument(document);
document = new Document();
//Index.NO Index.NOT_ANALYZED代表不分词
document.add(new Field("id","1",Store.YES,Index.ANALYZED));
document.add(new Field("title","txt2",Store.YES,Index.ANALYZED));
document.add(new Field("content",txt2,Store.YES,Index.NO));
writer.addDocument(document);
document = new Document();
document.add(new Field("content","哈哈哈",Store.YES,Index.NOT_ANALYZED));
writer.addDocument(document);
writer.close();
}
//查询
@Test
public void search() throws Exception{
Directory directory = new SimpleFSDirectory(file);
IndexSearcher searcher = new IndexSearcher(directory,true);
QueryParser parser = new QueryParser(Version.LUCENE_30, "content", analyzer);
Query query = parser.parse("哈哈哈");
TopDocs tdDocs = searcher.search(query, 100);
System.out.println("命中了多少次:"+tdDocs.totalHits);
if (tdDocs.scoreDocs != null) {
for (int i = 0; i < tdDocs.scoreDocs.length; i++) {
ScoreDoc sDoc = tdDocs.scoreDocs[i];
Document document = searcher.doc(sDoc.doc);
System.out.println("content: "+document.get("content"));
}
}else {
System.out.println("没有找到...");
}
}
//
@Test
public void reader() throws Exception{
Directory directory = new SimpleFSDirectory(file);
////直接读取索引库目录
IndexReader reader = IndexReader.open(directory);
//获取索引目录有多少条索引文档
int max = reader.maxDoc();
for (int i = 0; i < max; i++) {
Document document = reader.document(i);
System.out.println(document.get("content"));
}
}
}