diff --git a/core/src/main/scala/lightdb/index/IndexManager.scala b/core/src/main/scala/lightdb/index/IndexManager.scala new file mode 100644 index 00000000..5ce86d12 --- /dev/null +++ b/core/src/main/scala/lightdb/index/IndexManager.scala @@ -0,0 +1,16 @@ +package lightdb.index + +import cats.effect.IO +import lightdb.Document + +trait IndexManager[D <: Document[D]] { + protected var _fields = List.empty[IndexedField[_, D]] + + def fields: List[IndexedField[_, D]] = _fields + + def count(): IO[Int] + + protected[lightdb] def register[F](field: IndexedField[F, D]): Unit = synchronized { + _fields = field :: _fields + } +} diff --git a/core/src/main/scala/lightdb/index/IndexSupport.scala b/core/src/main/scala/lightdb/index/IndexSupport.scala index 9ce90a8d..20f2f309 100644 --- a/core/src/main/scala/lightdb/index/IndexSupport.scala +++ b/core/src/main/scala/lightdb/index/IndexSupport.scala @@ -1,25 +1,20 @@ package lightdb.index import cats.effect.IO -import lightdb.query.Query +import lightdb.query.{PagedResults, Query, SearchContext} import lightdb.{Collection, Document} -trait IndexSupport[D <: Document[D], IF[F] <: IndexedField[F, D]] extends Collection[D] { +trait IndexSupport[D <: Document[D]] extends Collection[D] { lazy val query: Query[D] = Query(this) protected def indexer: Indexer[D] override def commit(): IO[Unit] = super.commit().flatMap(_ => indexer.commit()) - def index: IndexManager[D, IF] -} + def index: IndexManager[D] -trait IndexManager[D <: Document[D], IF <: IndexedField[_, D]] { - protected var _fields = List.empty[IF] - - def fields: List[IF] = _fields - - protected[lightdb] def register[F, Field <: IF with IndexedField[F, D]](field: Field): Unit = synchronized { - _fields = field :: _fields - } + def doSearch(query: Query[D], + context: SearchContext[D], + offset: Int, + after: Option[PagedResults[D]]): IO[PagedResults[D]] } \ No newline at end of file diff --git a/core/src/main/scala/lightdb/index/Indexer.scala b/core/src/main/scala/lightdb/index/Indexer.scala index d3f6e066..dff84f13 100644 --- a/core/src/main/scala/lightdb/index/Indexer.scala +++ b/core/src/main/scala/lightdb/index/Indexer.scala @@ -1,14 +1,17 @@ package lightdb.index import cats.effect.IO -import lightdb.{Collection, Document, Id} +import lightdb.query.SearchContext +import lightdb.{Collection, Document, Id, query} trait Indexer[D <: Document[D]] { - def collection: Collection[D] + def indexSupport: IndexSupport[D] private[lightdb] def delete(id: Id[D]): IO[Unit] def commit(): IO[Unit] def count(): IO[Int] + + def withSearchContext[Return](f: SearchContext[D] => IO[Return]): IO[Return] } diff --git a/core/src/main/scala/lightdb/query/IndexContext.scala b/core/src/main/scala/lightdb/query/IndexContext.scala new file mode 100644 index 00000000..edb1ac88 --- /dev/null +++ b/core/src/main/scala/lightdb/query/IndexContext.scala @@ -0,0 +1,8 @@ +package lightdb.query + +import cats.effect.IO +import lightdb.Document + +trait IndexContext[D <: Document[D]] { + def nextPage(currentPage: PagedResults[D]): IO[Option[PagedResults[D]]] +} diff --git a/core/src/main/scala/lightdb/query/PagedResults.scala b/core/src/main/scala/lightdb/query/PagedResults.scala index 98b4dae1..cd848a5b 100644 --- a/core/src/main/scala/lightdb/query/PagedResults.scala +++ b/core/src/main/scala/lightdb/query/PagedResults.scala @@ -4,10 +4,6 @@ import cats.effect.IO import cats.implicits.toTraverseOps import lightdb.{Document, Id} -trait IndexContext[D <: Document[D]] { - def nextPage(): IO[Option[PagedResults[D]]] -} - case class PagedResults[D <: Document[D]](query: Query[D], context: IndexContext[D], offset: Int, @@ -17,11 +13,11 @@ case class PagedResults[D <: Document[D]](query: Query[D], lazy val pages: Int = math.ceil(total.toDouble / query.pageSize.toDouble).toInt def stream: fs2.Stream[IO, D] = fs2.Stream(ids: _*) - .evalMap(id => query.collection(id)) + .evalMap(id => query.indexSupport(id)) - def docs: IO[List[D]] = ids.map(id => query.collection(id)).sequence + def docs: IO[List[D]] = ids.map(id => query.indexSupport(id)).sequence def hasNext: Boolean = pages > (page + 1) - def next(): IO[Option[PagedResults[D]]] = context.nextPage() + def next(): IO[Option[PagedResults[D]]] = context.nextPage(this) } diff --git a/core/src/main/scala/lightdb/query/Query.scala b/core/src/main/scala/lightdb/query/Query.scala index a5c794fa..fa34c1d9 100644 --- a/core/src/main/scala/lightdb/query/Query.scala +++ b/core/src/main/scala/lightdb/query/Query.scala @@ -2,11 +2,10 @@ package lightdb.query import cats.effect.IO import lightdb.index.IndexSupport -import lightdb.{Collection, Document, Id} -import org.apache.lucene.index.StoredFields -import org.apache.lucene.search.{MatchAllDocsQuery, ScoreDoc, SortField, TopFieldDocs, Query => LuceneQuery, Sort => LuceneSort} +import lightdb.Document -case class Query[D <: Document[D]](filter: Option[Filter[D]] = None, +case class Query[D <: Document[D]](indexSupport: IndexSupport[D], + filter: Option[Filter[D]] = None, sort: List[Sort] = Nil, scoreDocs: Boolean = false, pageSize: Int = 1_000) { @@ -16,7 +15,8 @@ case class Query[D <: Document[D]](filter: Option[Filter[D]] = None, def scoreDocs(b: Boolean): Query[D] = copy(scoreDocs = b) def pageSize(size: Int): Query[D] = copy(pageSize = size) - def search()(implicit context: SearchContext[D]): IO[PagedResults[D]] = doSearch( + def search()(implicit context: SearchContext[D]): IO[PagedResults[D]] = indexSupport.doSearch( + query = this, context = context, offset = 0, after = None @@ -30,37 +30,4 @@ case class Query[D <: Document[D]](filter: Option[Filter[D]] = None, fs2.Stream.force(io) .flatMap(_.stream) } - - private[query] def doSearch(context: SearchContext[D], - offset: Int, - after: Option[ScoreDoc]): IO[PagedResults[D]] = IO { - val q = filter.map(_.asQuery).getOrElse(new MatchAllDocsQuery) - val sortFields = sort match { - case Nil => List(SortField.FIELD_SCORE) - case _ => sort.map(sort2SortField) - } - val s = new LuceneSort(sortFields: _*) - val topFieldDocs: TopFieldDocs = after match { - case Some(scoreDoc) => context.indexSearcher.searchAfter(scoreDoc, q, pageSize, s, this.scoreDocs) - case None => context.indexSearcher.search(q, pageSize, s, this.scoreDocs) - } - val scoreDocs: List[ScoreDoc] = topFieldDocs.scoreDocs.toList - val total: Int = topFieldDocs.totalHits.value.toInt - val storedFields: StoredFields = context.indexSearcher.storedFields() - val ids: List[Id[D]] = scoreDocs.map(doc => Id[D](storedFields.document(doc.doc).get("_id"))) - PagedResults( - query = this, - context = context, - offset = offset, - total = total, - ids = ids, - lastScoreDoc = scoreDocs.lastOption - ) - } - - private[query] def sort2SortField(sort: Sort): SortField = sort match { - case Sort.BestMatch => SortField.FIELD_SCORE - case Sort.IndexOrder => SortField.FIELD_DOC - case Sort.ByField(field, reverse) => new SortField(field.fieldName, field.sortType, reverse) - } } \ No newline at end of file diff --git a/core/src/main/scala/lightdb/query/SearchContext.scala b/core/src/main/scala/lightdb/query/SearchContext.scala new file mode 100644 index 00000000..839c2a63 --- /dev/null +++ b/core/src/main/scala/lightdb/query/SearchContext.scala @@ -0,0 +1,6 @@ +package lightdb.query + +import lightdb.index.IndexSupport +import lightdb.Document + +case class SearchContext[D <: Document[D]](indexSupport: IndexSupport[D]) \ No newline at end of file diff --git a/lucene/src/main/scala/lightdb/lucene/LuceneFilter.scala b/lucene/src/main/scala/lightdb/lucene/LuceneFilter.scala new file mode 100644 index 00000000..12d72409 --- /dev/null +++ b/lucene/src/main/scala/lightdb/lucene/LuceneFilter.scala @@ -0,0 +1,7 @@ +package lightdb.lucene + +import lightdb.Document +import lightdb.query.Filter +import org.apache.lucene.search.{Query => LuceneQuery} + +case class LuceneFilter[D <: Document[D]](asQuery: () => LuceneQuery) extends Filter[D] diff --git a/lucene/src/main/scala/lightdb/lucene/LuceneIndexContext.scala b/lucene/src/main/scala/lightdb/lucene/LuceneIndexContext.scala new file mode 100644 index 00000000..85039ba1 --- /dev/null +++ b/lucene/src/main/scala/lightdb/lucene/LuceneIndexContext.scala @@ -0,0 +1,20 @@ +package lightdb.lucene + +import cats.effect.IO +import lightdb.Document +import lightdb.query.{IndexContext, PagedResults, SearchContext} +import org.apache.lucene.search.ScoreDoc + +case class LuceneIndexContext[D <: Document[D]](context: SearchContext[D], + lastScoreDoc: Option[ScoreDoc]) extends IndexContext[D] { + override def nextPage(currentPage: PagedResults[D]): IO[Option[PagedResults[D]]] = if (currentPage.hasNext) { + currentPage.query.indexSupport.doSearch( + query = currentPage.query, + context = context, + offset = currentPage.offset + currentPage.query.pageSize, + after = Some(currentPage) + ).map(Some.apply) + } else { + IO.pure(None) + } +} diff --git a/lucene/src/main/scala/lightdb/lucene/LuceneIndexManager.scala b/lucene/src/main/scala/lightdb/lucene/LuceneIndexManager.scala new file mode 100644 index 00000000..dab45f74 --- /dev/null +++ b/lucene/src/main/scala/lightdb/lucene/LuceneIndexManager.scala @@ -0,0 +1,22 @@ +package lightdb.lucene + +import cats.effect.IO +import lightdb.Document +import lightdb.index.IndexManager +import lightdb.lucene.index._ + +case class LuceneIndexManager[D <: Document[D]](indexSupport: LuceneSupport[D]) extends IndexManager[D] { + def apply(name: String): IndexedFieldBuilder = IndexedFieldBuilder(name) + + override def count(): IO[Int] = indexSupport.indexer.count() + + case class IndexedFieldBuilder(fieldName: String) { + def tokenized(f: D => String): TokenizedField[D] = TokenizedField(fieldName, indexSupport, f) + def string(f: D => String, store: Boolean = false): StringField[D] = StringField(fieldName, indexSupport, f, store) + def int(f: D => Int): IntField[D] = IntField(fieldName, indexSupport, f) + def long(f: D => Long): LongField[D] = LongField(fieldName, indexSupport, f) + def float(f: D => Float): FloatField[D] = FloatField(fieldName, indexSupport, f) + def double(f: D => Double): DoubleField[D] = DoubleField(fieldName, indexSupport, f) + def bigDecimal(f: D => BigDecimal): BigDecimalField[D] = BigDecimalField(fieldName, indexSupport, f) + } +} diff --git a/lucene/src/main/scala/lightdb/lucene/LuceneIndexedField.scala b/lucene/src/main/scala/lightdb/lucene/LuceneIndexedField.scala new file mode 100644 index 00000000..7fbb9cce --- /dev/null +++ b/lucene/src/main/scala/lightdb/lucene/LuceneIndexedField.scala @@ -0,0 +1,14 @@ +package lightdb.lucene + +import lightdb.Document +import lightdb.index.IndexedField +import org.apache.lucene.search.SortField +import org.apache.lucene.{document => ld} + +trait LuceneIndexedField[F, D <: Document[D]] extends IndexedField[F, D] { + protected[lightdb] def createFields(doc: D): List[ld.Field] + + protected[lightdb] def sortType: SortField.Type + + collection.asInstanceOf[LuceneSupport[D]].index.register(this) +} diff --git a/lucene/src/main/scala/lightdb/lucene/LuceneIndexer.scala b/lucene/src/main/scala/lightdb/lucene/LuceneIndexer.scala new file mode 100644 index 00000000..4c640d21 --- /dev/null +++ b/lucene/src/main/scala/lightdb/lucene/LuceneIndexer.scala @@ -0,0 +1,72 @@ +package lightdb.lucene + +import cats.effect.IO +import lightdb.index.{IndexSupport, Indexer} +import lightdb.query.SearchContext +import lightdb.{Document, Id} +import org.apache.lucene.analysis.Analyzer +import org.apache.lucene.analysis.standard.StandardAnalyzer +import org.apache.lucene.index.{IndexWriter, IndexWriterConfig, Term} +import org.apache.lucene.queryparser.classic.QueryParser +import org.apache.lucene.search.{IndexSearcher, MatchAllDocsQuery, SearcherFactory, SearcherManager} +import org.apache.lucene.store.{ByteBuffersDirectory, FSDirectory} +import org.apache.lucene.document.{Document => LuceneDocument, Field => LuceneField} + +import java.nio.file.{Files, Path} +import java.util.concurrent.ConcurrentHashMap + +case class LuceneIndexer[D <: Document[D]](indexSupport: IndexSupport[D], + persistent: Boolean = true, + analyzer: Analyzer = new StandardAnalyzer) extends Indexer[D] { + private lazy val path: Option[Path] = if (persistent) { + val p = indexSupport.db.directory.resolve(indexSupport.collectionName).resolve("index") + Files.createDirectories(p) + Some(p) + } else { + None + } + private lazy val directory = path + .map(p => FSDirectory.open(p)) + .getOrElse(new ByteBuffersDirectory()) + private lazy val config = new IndexWriterConfig(analyzer) + private lazy val indexWriter = new IndexWriter(directory, config) + + private lazy val searcherManager = new SearcherManager(indexWriter, new SearcherFactory) + + private lazy val parser = new QueryParser("_id", analyzer) + + private[lucene] val contextMapping = new ConcurrentHashMap[SearchContext[D], IndexSearcher] + + override def withSearchContext[Return](f: SearchContext[D] => IO[Return]): IO[Return] = { + val indexSearcher = searcherManager.acquire() + val context = SearchContext(indexSupport) + contextMapping.put(context, indexSearcher) + f(context) + .guarantee(IO { + contextMapping.remove(context) + searcherManager.release(indexSearcher) + }) + } + + private[lightdb] def addDoc(id: Id[D], fields: List[LuceneField]): Unit = if (fields.length > 1) { + val document = new LuceneDocument + fields.foreach(document.add) + indexWriter.updateDocument(new Term("_id", id.value), document) + } + + private[lightdb] def delete(id: Id[D]): IO[Unit] = IO { + indexWriter.deleteDocuments(parser.parse(s"_id:${id.value}")) + } + + private def commitBlocking(): Unit = { + indexWriter.flush() + indexWriter.commit() + searcherManager.maybeRefreshBlocking() + } + + override def commit(): IO[Unit] = IO(commitBlocking()) + + override def count(): IO[Int] = withSearchContext { context => + IO(context.indexSupport.asInstanceOf[LuceneSupport[D]].indexSearcher(context).count(new MatchAllDocsQuery)) + } +} diff --git a/lucene/src/main/scala/lightdb/lucene/LuceneSupport.scala b/lucene/src/main/scala/lightdb/lucene/LuceneSupport.scala index b426471e..60fcdaec 100644 --- a/lucene/src/main/scala/lightdb/lucene/LuceneSupport.scala +++ b/lucene/src/main/scala/lightdb/lucene/LuceneSupport.scala @@ -4,29 +4,72 @@ import cats.effect.IO import lightdb._ import lightdb.index.{IndexManager, IndexSupport, IndexedField, Indexer} import lightdb.lucene.index._ -import lightdb.query.{IndexContext, PagedResults, Query} -import org.apache.lucene.search.{MatchAllDocsQuery, ScoreDoc, SearcherFactory, SearcherManager, SortField} +import lightdb.query.{Filter, IndexContext, PagedResults, Query, SearchContext, Sort} +import org.apache.lucene.search.{IndexSearcher, MatchAllDocsQuery, ScoreDoc, SearcherFactory, SearcherManager, SortField, TopFieldDocs, Query => LuceneQuery, Sort => LuceneSort} import org.apache.lucene.{document => ld} import org.apache.lucene.analysis.Analyzer import org.apache.lucene.analysis.standard.StandardAnalyzer -import org.apache.lucene.index.{IndexWriter, IndexWriterConfig, Term} +import org.apache.lucene.index.{IndexWriter, IndexWriterConfig, StoredFields, Term} import org.apache.lucene.queryparser.classic.QueryParser import org.apache.lucene.store.{ByteBuffersDirectory, FSDirectory} import org.apache.lucene.document.{Document => LuceneDocument, Field => LuceneField} import java.nio.file.{Files, Path} +import java.util.concurrent.ConcurrentHashMap -trait LuceneSupport[D <: Document[D]] extends IndexSupport[D, LuceneIndexer[_]] { - override protected lazy val indexer: LuceneIndexer[D] = LuceneIndexer[D](this) +trait LuceneSupport[D <: Document[D]] extends IndexSupport[D] { + override protected[lucene] lazy val indexer: LuceneIndexer[D] = LuceneIndexer[D](this) override lazy val index: LuceneIndexManager[D] = LuceneIndexManager(this) val _id: StringField[D] = index("_id").string(_._id.value, store = true) + protected[lucene] def indexSearcher(context: SearchContext[D]): IndexSearcher = indexer.contextMapping.get(context) + def withSearchContext[Return](f: SearchContext[D] => IO[Return]): IO[Return] = indexer.withSearchContext(f) + private def sort2SortField(sort: Sort): SortField = sort match { + case Sort.BestMatch => SortField.FIELD_SCORE + case Sort.IndexOrder => SortField.FIELD_DOC + case Sort.ByField(field, reverse) => new SortField(field.fieldName, field.asInstanceOf[LuceneIndexedField[_, D]].sortType, reverse) + } + + override def doSearch(query: Query[D], + context: SearchContext[D], + offset: Int, + after: Option[PagedResults[D]]): IO[PagedResults[D]] = IO { + val q = query.filter.map(_.asInstanceOf[LuceneFilter[D]].asQuery()).getOrElse(new MatchAllDocsQuery) + val sortFields = query.sort match { + case Nil => List(SortField.FIELD_SCORE) + case _ => query.sort.map(sort2SortField) + } + val s = new LuceneSort(sortFields: _*) + val indexSearcher = query.indexSupport.asInstanceOf[LuceneSupport[D]].indexSearcher(context) + val topFieldDocs: TopFieldDocs = after match { + case Some(afterPage) => + val afterDoc = afterPage.context.asInstanceOf[LuceneIndexContext[D]].lastScoreDoc.get + indexSearcher.searchAfter(afterDoc, q, query.pageSize, s, query.scoreDocs) + case None => indexSearcher.search(q, query.pageSize, s, query.scoreDocs) + } + val scoreDocs: List[ScoreDoc] = topFieldDocs.scoreDocs.toList + val total: Int = topFieldDocs.totalHits.value.toInt + val storedFields: StoredFields = indexSearcher.storedFields() + val ids: List[Id[D]] = scoreDocs.map(doc => Id[D](storedFields.document(doc.doc).get("_id"))) + val indexContext = LuceneIndexContext( + context = context, + lastScoreDoc = scoreDocs.lastOption + ) + PagedResults( + query = query, + context = indexContext, + offset = offset, + total = total, + ids = ids + ) + } + override protected def postSet(doc: D): IO[Unit] = for { fields <- IO(index.fields.flatMap { field => - field.createFields(doc) + field.asInstanceOf[LuceneIndexedField[_, D]].createFields(doc) }) _ = indexer.addDoc(doc._id, fields) _ <- super.postSet(doc) @@ -35,95 +78,4 @@ trait LuceneSupport[D <: Document[D]] extends IndexSupport[D, LuceneIndexer[_]] override protected def postDelete(doc: D): IO[Unit] = indexer.delete(doc._id).flatMap { _ => super.postDelete(doc) } -} - -case class LuceneIndexContext[D <: Document[D]](page: PagedResults[D], - context: SearchContext[D], - lastScoreDoc: Option[ScoreDoc]) extends IndexContext[D] { - override def nextPage(): IO[Option[PagedResults[D]]] = if (page.hasNext) { - query.doSearch( - context = context, - offset = offset + query.pageSize, - after = lastScoreDoc - ).map(Some.apply) - } else { - IO.pure(None) - } -} - -case class LuceneIndexManager[D <: Document[D]](collection: Collection[D]) extends IndexManager[D, LuceneIndexedField[_, D]] { - def apply(name: String): IndexedFieldBuilder = IndexedFieldBuilder(name) - - case class IndexedFieldBuilder(fieldName: String) { - def tokenized(f: D => String): TokenizedField[D] = TokenizedField(fieldName, collection, f) - def string(f: D => String, store: Boolean = false): StringField[D] = StringField(fieldName, collection, f, store) - def int(f: D => Int): IntField[D] = IntField(fieldName, collection, f) - def long(f: D => Long): LongField[D] = LongField(fieldName, collection, f) - def float(f: D => Float): FloatField[D] = FloatField(fieldName, collection, f) - def double(f: D => Double): DoubleField[D] = DoubleField(fieldName, collection, f) - def bigDecimal(f: D => BigDecimal): BigDecimalField[D] = BigDecimalField(fieldName, collection, f) - } -} - -/* -FILTER: def apply[D <: Document[D]](f: => LuceneQuery): Filter[D] = new Filter[D] { - override protected[lightdb] def asQuery: LuceneQuery = f - } - */ - -trait LuceneIndexedField[F, D <: Document[D]] extends IndexedField[F, D] { - protected[lightdb] def createFields(doc: D): List[ld.Field] - protected[lightdb] def sortType: SortField.Type - - collection.asInstanceOf[LuceneSupport].index.register(this) -} - -case class LuceneIndexer[D <: Document[D]](collection: Collection[D], - persistent: Boolean = true, - analyzer: Analyzer = new StandardAnalyzer) extends Indexer[D] { - private lazy val path: Option[Path] = if (persistent) { - val p = collection.db.directory.resolve(collection.collectionName).resolve("index") - Files.createDirectories(p) - Some(p) - } else { - None - } - private lazy val directory = path - .map(p => FSDirectory.open(p)) - .getOrElse(new ByteBuffersDirectory()) - private lazy val config = new IndexWriterConfig(analyzer) - private lazy val indexWriter = new IndexWriter(directory, config) - - private lazy val searcherManager = new SearcherManager(indexWriter, new SearcherFactory) - - private lazy val parser = new QueryParser("_id", analyzer) - - def withSearchContext[Return](f: SearchContext[D] => IO[Return]): IO[Return] = { - val indexSearcher = searcherManager.acquire() - val context = SearchContext(collection, indexSearcher) - f(context) - .guarantee(IO(searcherManager.release(indexSearcher))) - } - - private[lightdb] def addDoc(id: Id[D], fields: List[LuceneField]): Unit = if (fields.length > 1) { - val document = new LuceneDocument - fields.foreach(document.add) - indexWriter.updateDocument(new Term("_id", id.value), document) - } - - private[lightdb] def delete(id: Id[D]): IO[Unit] = IO { - indexWriter.deleteDocuments(parser.parse(s"_id:${id.value}")) - } - - private def commitBlocking(): Unit = { - indexWriter.flush() - indexWriter.commit() - searcherManager.maybeRefreshBlocking() - } - - override def commit(): IO[Unit] = IO(commitBlocking()) - - override def count(): IO[Int] = withSearchContext { context => - IO(context.indexSearcher.count(new MatchAllDocsQuery)) - } -} +} \ No newline at end of file diff --git a/lucene/src/main/scala/lightdb/lucene/SearchContext.scala b/lucene/src/main/scala/lightdb/lucene/SearchContext.scala deleted file mode 100644 index dc3577b5..00000000 --- a/lucene/src/main/scala/lightdb/lucene/SearchContext.scala +++ /dev/null @@ -1,7 +0,0 @@ -package lightdb.lucene - -import lightdb.{Collection, Document} -import org.apache.lucene.search.IndexSearcher - -case class SearchContext[D <: Document[D]](collection: Collection[D], - private[lightdb] val indexSearcher: IndexSearcher) \ No newline at end of file diff --git a/lucene/src/main/scala/lightdb/lucene/index/IntField.scala b/lucene/src/main/scala/lightdb/lucene/index/IntField.scala index 7b21b132..22f2b741 100644 --- a/lucene/src/main/scala/lightdb/lucene/index/IntField.scala +++ b/lucene/src/main/scala/lightdb/lucene/index/IntField.scala @@ -1,7 +1,6 @@ package lightdb.lucene.index -import lightdb.index.IndexedField -import lightdb.lucene.LuceneIndexedField +import lightdb.lucene.{LuceneFilter, LuceneIndexedField} import lightdb.query.Filter import lightdb.{Collection, Document} import org.apache.lucene.document.Field @@ -13,9 +12,9 @@ case class IntField[D <: Document[D]](fieldName: String, get: D => Int) extends LuceneIndexedField[Int, D] { def ===(value: Int): Filter[D] = is(value) - def is(value: Int): Filter[D] = Filter(ld.IntField.newExactQuery(fieldName, value)) + def is(value: Int): Filter[D] = LuceneFilter(() => ld.IntField.newExactQuery(fieldName, value)) - def between(lower: Int, upper: Int): Filter[D] = Filter(ld.IntField.newRangeQuery(fieldName, lower, upper)) + def between(lower: Int, upper: Int): Filter[D] = LuceneFilter(() => ld.IntField.newRangeQuery(fieldName, lower, upper)) override protected[lightdb] def createFields(doc: D): List[Field] = List( new ld.IntField(fieldName, get(doc), Field.Store.NO) diff --git a/lucene/src/main/scala/lightdb/lucene/index/StringField.scala b/lucene/src/main/scala/lightdb/lucene/index/StringField.scala index 6048f047..7bcd7444 100644 --- a/lucene/src/main/scala/lightdb/lucene/index/StringField.scala +++ b/lucene/src/main/scala/lightdb/lucene/index/StringField.scala @@ -1,7 +1,6 @@ package lightdb.lucene.index -import lightdb.index.IndexedField -import lightdb.lucene.LuceneIndexedField +import lightdb.lucene.{LuceneFilter, LuceneIndexedField} import lightdb.query.Filter import lightdb.{Collection, Document} import org.apache.lucene.index.Term @@ -14,15 +13,15 @@ case class StringField[D <: Document[D]](fieldName: String, store: Boolean) extends LuceneIndexedField[String, D] { def ===(value: String): Filter[D] = is(value) - def is(value: String): Filter[D] = Filter(new TermQuery(new Term(fieldName, value))) + def is(value: String): Filter[D] = LuceneFilter(() => new TermQuery(new Term(fieldName, value))) def includedIn(values: Seq[String]): Filter[D] = { val b = new BooleanQuery.Builder b.setMinimumNumberShouldMatch(1) values.foreach { value => - b.add(is(value).asQuery, BooleanClause.Occur.SHOULD) + b.add(is(value).asInstanceOf[LuceneFilter[D]].asQuery(), BooleanClause.Occur.SHOULD) } - Filter(b.build()) + LuceneFilter(() => b.build()) } override protected[lightdb] def createFields(doc: D): List[ld.Field] = List( diff --git a/lucene/src/test/scala/spec/SimpleSpec.scala b/lucene/src/test/scala/spec/SimpleSpec.scala new file mode 100644 index 00000000..aa729b17 --- /dev/null +++ b/lucene/src/test/scala/spec/SimpleSpec.scala @@ -0,0 +1,245 @@ +package spec + +import cats.effect.IO +import cats.effect.testing.scalatest.AsyncIOSpec +import fabric.rw._ +import lightdb._ +import lightdb.lucene.LuceneSupport +import lightdb.lucene.index.{IntField, StringField} +import lightdb.query._ +import lightdb.upgrade.DatabaseUpgrade +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AsyncWordSpec +import scribe.{Level, Logger} + +import java.nio.file.Paths + +class SimpleSpec extends AsyncWordSpec with AsyncIOSpec with Matchers { + private val id1 = Id[Person]("john") + private val id2 = Id[Person]("jane") + + private val p1 = Person("John Doe", 21, id1) + private val p2 = Person("Jane Doe", 19, id2) + + "Simple database" should { + "initialize the database" in { + Logger("com.oath.halodb").withMinimumLevel(Level.Warn).replace() + DB.init(truncate = true) + } + "store John Doe" in { + Person.set(p1).map { p => + p._id should be(id1) + } + } + "verify John Doe exists" in { + Person.get(id1).map { o => + o should be(Some(p1)) + } + } + "storage Jane Doe" in { + Person.set(p2).map { p => + p._id should be(id2) + } + } + "verify Jane Doe exists" in { + Person.get(id2).map { o => + o should be(Some(p2)) + } + } + "verify exactly two objects in data" in { + Person.size.map { size => + size should be(2) + } + } + "flush data" in { + Person.commit() + } + "verify exactly two objects in index" in { + Person.index.count().map { size => + size should be(2) + } + } + "verify exactly two objects in the store" in { + Person.idStream.compile.toList.map { ids => + ids.toSet should be(Set(id1, id2)) + } + } + "search by name for positive result" in { + Person.withSearchContext { implicit context => + Person + .query + .filter(Person.name.is("Jane Doe")) + .search() + .flatMap { page => + page.page should be(0) + page.pages should be(1) + page.offset should be(0) + page.total should be(1) + page.ids should be(List(id2)) + page.hasNext should be(false) + page.docs.map { people => + people.length should be(1) + val p = people.head + p._id should be(id2) + p.name should be("Jane Doe") + p.age should be(19) + } + } + } + } + "search by age for positive result" in { + Person.ageLinks.query(19).compile.toList.map { people => + people.length should be(1) + val p = people.head + p._id should be(id2) + p.name should be("Jane Doe") + p.age should be(19) + } + } + "search by id for John" in { + Person(id1).map { person => + person._id should be(id1) + person.name should be("John Doe") + person.age should be(21) + } + } + "search for age range" in { + Person.withSearchContext { implicit context => + Person + .query + .filter(Person.age.between(19, 21)) + .search() + .flatMap { results => + results.docs.map { people => + people.length should be(2) + val names = people.map(_.name).toSet + names should be(Set("John Doe", "Jane Doe")) + val ages = people.map(_.age).toSet + ages should be(Set(21, 19)) + } + } + } + } + "do paginated search" in { + Person.withSearchContext { implicit context => + Person.query.pageSize(1).search().flatMap { page1 => + page1.page should be(0) + page1.pages should be(2) + page1.hasNext should be(true) + page1.docs.flatMap { people1 => + people1.length should be(1) + page1.next().flatMap { + case Some(page2) => + page2.page should be(1) + page2.pages should be(2) + page2.hasNext should be(false) + page2.docs.map { people2 => + people2.length should be(1) + } + case None => fail("Should have a second page") + } + } + } + } + } + "do paginated search as a stream" in { + Person.withSearchContext { implicit context => + Person.query.pageSize(1).stream.compile.toList.map { people => + people.length should be(2) + people.map(_.name).toSet should be(Set("John Doe", "Jane Doe")) + } + } + } + "delete John" in { + Person.delete(id1).map { deleted => + deleted should not be empty + } + } + "verify exactly one object in data" in { + Person.size.map { size => + size should be(1) + } + } + "commit data" in { + Person.commit() + } + "verify exactly one object in index" in { + Person.index.count().map { size => + size should be(1) + } + } + "list all documents" in { + Person.stream.compile.toList.map { people => + people.length should be(1) + val p = people.head + p._id should be(id2) + p.name should be("Jane Doe") + p.age should be(19) + } + } + "replace Jane Doe" in { + Person.set(Person("Jan Doe", 20, id2)).map { p => + p._id should be(id2) + } + } + "verify Jan Doe" in { + Person(id2).map { p => + p._id should be(id2) + p.name should be("Jan Doe") + p.age should be(20) + } + } + "commit new data" in { + Person.commit() + } + "list new documents" in { + Person.stream.compile.toList.map { results => + results.length should be(1) + val doc = results.head + doc._id should be(id2) + doc.name should be("Jan Doe") + doc.age should be(20) + } + } + "verify start time has been set" in { + DB.startTime.get().map { startTime => + startTime should be > 0L + } + } + "dispose" in { + DB.dispose() + } + } + + object DB extends LightDB(directory = Paths.get("testdb")) { +// override protected def autoCommit: Boolean = true + + val startTime: StoredValue[Long] = stored[Long]("startTime", -1L) + +// val people: Collection[Person] = collection("people", Person) + + override lazy val collections: List[Collection[_]] = List( + Person + ) + + override def upgrades: List[DatabaseUpgrade] = List(InitialSetupUpgrade) + } + + case class Person(name: String, age: Int, _id: Id[Person] = Id()) extends Document[Person] + + object Person extends Collection[Person]("people", DB) with LuceneSupport[Person] { + override implicit val rw: RW[Person] = RW.gen + + val name: StringField[Person] = index("name").string(_.name) + val age: IntField[Person] = index("age").int(_.age) + val ageLinks: IndexedLinks[Int, Person] = indexedLinks[Int]("age", _.toString, _.age) + } + + object InitialSetupUpgrade extends DatabaseUpgrade { + override def applyToNew: Boolean = true + override def blockStartup: Boolean = true + override def alwaysRun: Boolean = false + + override def upgrade(ldb: LightDB): IO[Unit] = DB.startTime.set(System.currentTimeMillis()).map(_ => ()) + } +} \ No newline at end of file