我们在D盘下建一个文件夹叫lucene,lucene内再建两个文件夹,一个叫example,一个叫index01。example文件夹下三个txt文件,a.txt内容为hello java,b.txt内容为hello lucene,c.txt内容为hello hadoop。
package com.amazing; import java.io.File; import java.io.FileReader; import java.io.IOException; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.Version; public class HelloLucene { public void createIndex(){ IndexWriter writer = null; try { Directory directory = FSDirectory.open(new File("D:"+File.separator+"lucene"+File.separator+"index01")); IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_35,new StandardAnalyzer(Version.LUCENE_35)); writer = new IndexWriter(directory,iwc); Document doc = null; File f = new File("D:"+File.separator+"lucene"+File.separator+"example"); for(File file:f.listFiles()){ doc = new Document(); doc.add(new Field("content",new FileReader(file))); doc.add(new Field("filename",file.getName(),Field.Store.YES,Field.Index.NOT_ANALYZED)); doc.add(new Field("path",file.getAbsolutePath(),Field.Store.YES,Field.Index.NOT_ANALYZED)); writer.addDocument(doc); } } catch (IOException e) { e.printStackTrace(); } finally{ if(writer != null){ try { writer.close(); } catch (CorruptIndexException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } } } } }
运行测试类:
package com.amazing; import org.junit.Test; public class TestLucene { @Test public void testCreateIndex(){ HelloLucene hl = new HelloLucene(); hl.createIndex(); } }
文件夹index01下出现了一些文件:
时间: 2024-10-01 22:27:57