import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.analysis.SimpleAnalyzer;
import junit.framework.TestCase;
import java.io.IOException;
import java.util.Collection;
import java.util.ArrayList;
import java.util.Iterator;
public class FSversusRAMDirectoryTest extends TestCase
{
private Directory fsDir;
private Directory ramDir;
private Collection docs = loadDocuments(3000, 5);//加载数据
protected void setUp() throws Exception
{
String fsIndexDir = System.getProperty("java.io.tmpdir", "tmp") + System.getProperty("file.separator") + "fs-index";
ramDir = new RAMDirectory();//内存中目录
fsDir = FSDirectory.getDirectory(fsIndexDir, true);
}
public void testTiming() throws IOException
{
long ramTiming = timeIndexWriter(ramDir);
long fsTiming = timeIndexWriter(fsDir);
assertTrue(fsTiming > ramTiming);
System.out.println("RAMDirectory Time: " + (ramTiming) + " ms");
System.out.println("FSDirectory Time : " + (fsTiming) + " ms");
}
private long timeIndexWriter(Directory dir) throws IOException
{
long start = System.currentTimeMillis();
addDocuments(dir);
long stop = System.currentTimeMillis();
return (stop - start);
}
private void addDocuments(Directory dir) throws IOException
{
IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(),true);
/**
// change to adjust performance of indexing with FSDirectory
writer.mergeFactor = writer.mergeFactor;
writer.maxMergeDocs = writer.maxMergeDocs;
writer.minMergeDocs = writer.minMergeDocs;
*/
for (Iterator iter = docs.iterator(); iter.hasNext();)
{
Document doc = new Document();
String word = (String) iter.next();
doc.add(new Field("keyword",word,Field.Store.YES,Field.Index.UN_TOKENIZED));
doc.add(new Field("unindexed",word,Field.Store.YES,Field.Index.NO));
doc.add(new Field("unstored",word,Field.Store.NO,Field.Index.TOKENIZED));
doc.add(new Field("text",word,Field.Store.YES,Field.Index.TOKENIZED));
writer.addDocument(doc);
}
writer.optimize();
writer.close();
}
private Collection loadDocuments(int numDocs, int wordsPerDoc)
{
Collection docs = new ArrayList(numDocs);
for (int i = 0; i < numDocs; i++)
{
StringBuffer doc = new StringBuffer(wordsPerDoc);
for (int j = 0; j < wordsPerDoc; j++)
{
doc.append("Bibamus ");
}
docs.add(doc.toString());
}
return docs;
}
}
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.analysis.SimpleAnalyzer;
import junit.framework.TestCase;
import java.io.IOException;
import java.util.Collection;
import java.util.ArrayList;
import java.util.Iterator;
public class FSversusRAMDirectoryTest extends TestCase
{
private Directory fsDir;
private Directory ramDir;
private Collection docs = loadDocuments(3000, 5);//加载数据
protected void setUp() throws Exception
{
String fsIndexDir = System.getProperty("java.io.tmpdir", "tmp") + System.getProperty("file.separator") + "fs-index";
ramDir = new RAMDirectory();//内存中目录
fsDir = FSDirectory.getDirectory(fsIndexDir, true);
}
public void testTiming() throws IOException
{
long ramTiming = timeIndexWriter(ramDir);
long fsTiming = timeIndexWriter(fsDir);
assertTrue(fsTiming > ramTiming);
System.out.println("RAMDirectory Time: " + (ramTiming) + " ms");
System.out.println("FSDirectory Time : " + (fsTiming) + " ms");
}
private long timeIndexWriter(Directory dir) throws IOException
{
long start = System.currentTimeMillis();
addDocuments(dir);
long stop = System.currentTimeMillis();
return (stop - start);
}
private void addDocuments(Directory dir) throws IOException
{
IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(),true);
/**
// change to adjust performance of indexing with FSDirectory
writer.mergeFactor = writer.mergeFactor;
writer.maxMergeDocs = writer.maxMergeDocs;
writer.minMergeDocs = writer.minMergeDocs;
*/
for (Iterator iter = docs.iterator(); iter.hasNext();)
{
Document doc = new Document();
String word = (String) iter.next();
doc.add(new Field("keyword",word,Field.Store.YES,Field.Index.UN_TOKENIZED));
doc.add(new Field("unindexed",word,Field.Store.YES,Field.Index.NO));
doc.add(new Field("unstored",word,Field.Store.NO,Field.Index.TOKENIZED));
doc.add(new Field("text",word,Field.Store.YES,Field.Index.TOKENIZED));
writer.addDocument(doc);
}
writer.optimize();
writer.close();
}
private Collection loadDocuments(int numDocs, int wordsPerDoc)
{
Collection docs = new ArrayList(numDocs);
for (int i = 0; i < numDocs; i++)
{
StringBuffer doc = new StringBuffer(wordsPerDoc);
for (int j = 0; j < wordsPerDoc; j++)
{
doc.append("Bibamus ");
}
docs.add(doc.toString());
}
return docs;
}
}