diff --git a/benchmark/charts/Get Each Record Multi-total-time.png b/benchmark/charts/Get Each Record Multi-total-time.png deleted file mode 100644 index c40b8def..00000000 Binary files a/benchmark/charts/Get Each Record Multi-total-time.png and /dev/null differ diff --git a/benchmark/charts/Get Each Record-total-time.png b/benchmark/charts/Get Each Record-total-time.png deleted file mode 100644 index 067973c0..00000000 Binary files a/benchmark/charts/Get Each Record-total-time.png and /dev/null differ diff --git a/benchmark/charts/Insert Records-total-time.png b/benchmark/charts/Insert Records-total-time.png deleted file mode 100644 index d6fa7aef..00000000 Binary files a/benchmark/charts/Insert Records-total-time.png and /dev/null differ diff --git a/benchmark/charts/Search All Records Multi-total-time.png b/benchmark/charts/Search All Records Multi-total-time.png deleted file mode 100644 index 3f4c08f8..00000000 Binary files a/benchmark/charts/Search All Records Multi-total-time.png and /dev/null differ diff --git a/benchmark/charts/Search All Records-total-time.png b/benchmark/charts/Search All Records-total-time.png deleted file mode 100644 index 0571e76b..00000000 Binary files a/benchmark/charts/Search All Records-total-time.png and /dev/null differ diff --git a/benchmark/charts/Search Each Record Multi-total-time.png b/benchmark/charts/Search Each Record Multi-total-time.png deleted file mode 100644 index 8342dfae..00000000 Binary files a/benchmark/charts/Search Each Record Multi-total-time.png and /dev/null differ diff --git a/benchmark/charts/Search Each Record-total-time.png b/benchmark/charts/Search Each Record-total-time.png deleted file mode 100644 index b5fb880a..00000000 Binary files a/benchmark/charts/Search Each Record-total-time.png and /dev/null differ diff --git a/benchmark/charts/Stream Records Multi-total-time.png b/benchmark/charts/Stream Records Multi-total-time.png deleted file mode 100644 index 117c63ed..00000000 Binary files a/benchmark/charts/Stream Records Multi-total-time.png and /dev/null differ diff --git a/benchmark/charts/Stream Records-total-time.png b/benchmark/charts/Stream Records-total-time.png deleted file mode 100644 index 2b85de5a..00000000 Binary files a/benchmark/charts/Stream Records-total-time.png and /dev/null differ diff --git a/benchmark/report-LightDB-SQLite-Previous.json b/benchmark/report-LightDB-SQLite-Previous.json deleted file mode 100644 index 0b01c962..00000000 --- a/benchmark/report-LightDB-SQLite-Previous.json +++ /dev/null @@ -1,274 +0,0 @@ -[ - { - "benchName": "LightDB SQLiteStore - Previous", - "name": "Insert Records", - "maxProgress": 1.0E+7, - "size": 1397116928, - "logs": [ - { - "progress": 333, - "timeStamp": 1720813840615, - "elapsed": 0.0, - "heap": 51427760, - "nonHeap": 21621184, - "cpuLoad": 0.0, - "cpuTime": 1860000000 - }, - { - "progress": 5000000, - "timeStamp": 1720813870626, - "elapsed": 30.011, - "heap": 1222986896, - "nonHeap": 20577896, - "cpuLoad": 0.03466111584192765, - "cpuTime": 34830000000 - }, - { - "progress": 10000000, - "timeStamp": 1720813900626, - "elapsed": 60.011, - "heap": 1123415472, - "nonHeap": 20578424, - "cpuLoad": 0.03342586167013648, - "cpuTime": 66620000000 - }, - { - "progress": 10000000, - "timeStamp": 1720813906494, - "elapsed": 65.879, - "heap": 1240855984, - "nonHeap": 20578616, - "cpuLoad": 0.03110826530067766, - "cpuTime": 72450000000 - } - ] - }, - { - "benchName": "LightDB SQLiteStore - Previous", - "name": "Stream Records", - "maxProgress": 1.0E+7, - "size": 1397116928, - "logs": [ - { - "progress": 0, - "timeStamp": 1720813906498, - "elapsed": 0.0, - "heap": 1240855984, - "nonHeap": 20619504, - "cpuLoad": 0.07142857142857142, - "cpuTime": 72460000000 - }, - { - "progress": 10000000, - "timeStamp": 1720813910255, - "elapsed": 3.757, - "heap": 1942263920, - "nonHeap": 22804864, - "cpuLoad": 0.03230114347717219, - "cpuTime": 76330000000 - } - ] - }, - { - "benchName": "LightDB SQLiteStore - Previous", - "name": "Stream Records Multi", - "maxProgress": 8.0E+7, - "size": 1397116928, - "logs": [ - { - "progress": 0, - "timeStamp": 1720813910256, - "elapsed": 0.001, - "heap": 1942263920, - "nonHeap": 22827984, - "cpuLoad": 0.0, - "cpuTime": 76330000000 - }, - { - "progress": 66053319, - "timeStamp": 1720813940256, - "elapsed": 30.001, - "heap": 615857848, - "nonHeap": 22126584, - "cpuLoad": 0.031463562456895654, - "cpuTime": 106440000000 - }, - { - "progress": 80000000, - "timeStamp": 1720813946532, - "elapsed": 36.277, - "heap": 3635767048, - "nonHeap": 22127480, - "cpuLoad": 0.03139505864736711, - "cpuTime": 112730000000 - } - ] - }, - { - "benchName": "LightDB SQLiteStore - Previous", - "name": "Search Each Record", - "maxProgress": 1.0E+7, - "size": 1397116928, - "logs": [ - { - "progress": 0, - "timeStamp": 1720813946537, - "elapsed": 0.0, - "heap": 3635767048, - "nonHeap": 22166744, - "cpuLoad": 0.0, - "cpuTime": 112730000000 - }, - { - "progress": 10000000, - "timeStamp": 1720813972112, - "elapsed": 25.575, - "heap": 3353704456, - "nonHeap": 24205808, - "cpuLoad": 0.03431942523530348, - "cpuTime": 140770000000 - } - ] - }, - { - "benchName": "LightDB SQLiteStore - Previous", - "name": "Search Each Record Multi", - "maxProgress": 8.0E+7, - "size": 1397116928, - "logs": [ - { - "progress": 0, - "timeStamp": 1720813972115, - "elapsed": 0.0, - "heap": 3370481672, - "nonHeap": 24218160, - "cpuLoad": 0.09090909090909091, - "cpuTime": 140780000000 - }, - { - "progress": 11661185, - "timeStamp": 1720814002116, - "elapsed": 30.001, - "heap": 2994173320, - "nonHeap": 24421208, - "cpuLoad": 0.033140804027448484, - "cpuTime": 172510000000 - }, - { - "progress": 23911052, - "timeStamp": 1720814032117, - "elapsed": 60.002, - "heap": 447268792, - "nonHeap": 24505760, - "cpuLoad": 0.03307500547599429, - "cpuTime": 204220000000 - }, - { - "progress": 35745277, - "timeStamp": 1720814062117, - "elapsed": 90.002, - "heap": 1328507320, - "nonHeap": 24507040, - "cpuLoad": 0.03316792896317576, - "cpuTime": 235970000000 - }, - { - "progress": 47724759, - "timeStamp": 1720814092118, - "elapsed": 120.003, - "heap": 368606888, - "nonHeap": 24512608, - "cpuLoad": 0.03324082382154707, - "cpuTime": 267830000000 - }, - { - "progress": 59743999, - "timeStamp": 1720814122118, - "elapsed": 150.003, - "heap": 5553217496, - "nonHeap": 24512608, - "cpuLoad": 0.03302926763006384, - "cpuTime": 299450000000 - }, - { - "progress": 71888131, - "timeStamp": 1720814152119, - "elapsed": 180.004, - "heap": 3634477488, - "nonHeap": 24516064, - "cpuLoad": 0.032843705304477645, - "cpuTime": 330900000000 - }, - { - "progress": 80000000, - "timeStamp": 1720814172598, - "elapsed": 200.483, - "heap": 722560200, - "nonHeap": 24516064, - "cpuLoad": 0.03349706958025372, - "cpuTime": 352790000000 - } - ] - }, - { - "benchName": "LightDB SQLiteStore - Previous", - "name": "Search All Records", - "maxProgress": 1.0E+7, - "size": 1397116928, - "logs": [ - { - "progress": 0, - "timeStamp": 1720814172602, - "elapsed": 0.0, - "heap": 739337416, - "nonHeap": 24518640, - "cpuLoad": 0.0, - "cpuTime": 352790000000 - }, - { - "progress": 10000000, - "timeStamp": 1720814177427, - "elapsed": 4.825, - "heap": 6024160456, - "nonHeap": 24548288, - "cpuLoad": 0.03128651174866935, - "cpuTime": 357610000000 - } - ] - }, - { - "benchName": "LightDB SQLiteStore - Previous", - "name": "Search All Records Multi", - "maxProgress": 1.0E+7, - "size": 1397116928, - "logs": [ - { - "progress": 0, - "timeStamp": 1720814177428, - "elapsed": 0.0, - "heap": 6040937672, - "nonHeap": 24551240, - "cpuLoad": 0.3333333333333333, - "cpuTime": 357620000000 - }, - { - "progress": 62600278, - "timeStamp": 1720814207429, - "elapsed": 30.001, - "heap": 7243424616, - "nonHeap": 24580144, - "cpuLoad": 0.031347471860297724, - "cpuTime": 387670000000 - }, - { - "progress": 80000000, - "timeStamp": 1720814215875, - "elapsed": 38.447, - "heap": 481877480, - "nonHeap": 24581168, - "cpuLoad": 0.03145424836601307, - "cpuTime": 396140000000 - } - ] - } -] \ No newline at end of file diff --git a/benchmark/src/main/scala/benchmark/FlushingBacklog.scala b/benchmark/src/main/scala/benchmark/FlushingBacklog.scala index 2d7bd98d..8c0055be 100644 --- a/benchmark/src/main/scala/benchmark/FlushingBacklog.scala +++ b/benchmark/src/main/scala/benchmark/FlushingBacklog.scala @@ -1,6 +1,7 @@ package benchmark import cats.effect.IO +import rapid._ import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger} @@ -36,7 +37,7 @@ abstract class FlushingBacklog[Key, Value](val batchSize: Int, val maxSize: Int) } private def waitForBuffer(): Task[Unit] = if (size.get() > maxSize) { - IO.sleep(1.second).flatMap(_ => waitForBuffer()) + Task.sleep(1.second).flatMap(_ => waitForBuffer()) } else { Task.unit } @@ -50,8 +51,8 @@ abstract class FlushingBacklog[Key, Value](val batchSize: Int, val maxSize: Int) } } - private def pollingStream: rapid.Stream[Value] = fs2.Stream - .fromBlockingIterator[IO](map.keys().asIterator().asScala, 512) + private def pollingStream: rapid.Stream[Value] = rapid.Stream + .fromIterator(Task(map.keys().asIterator().asScala)) .map { key => val o = Option(map.remove(key)) if (o.nonEmpty) { @@ -66,7 +67,6 @@ abstract class FlushingBacklog[Key, Value](val batchSize: Int, val maxSize: Int) .unNone private def prepareWrite(): Task[Unit] = pollingStream - .compile .toList .flatMap { list => writeBatched(list) diff --git a/benchmark/src/main/scala/benchmark/IOIterator.scala b/benchmark/src/main/scala/benchmark/IOIterator.scala index 4f7b9416..20d8bf38 100644 --- a/benchmark/src/main/scala/benchmark/IOIterator.scala +++ b/benchmark/src/main/scala/benchmark/IOIterator.scala @@ -1,10 +1,9 @@ package benchmark -import cats.effect.IO import cats.implicits._ +import rapid.Task import java.util.concurrent.atomic.AtomicInteger -import scala.concurrent.{ExecutionContext, Future} trait IOIterator[T] { val running = new AtomicInteger(0) @@ -16,7 +15,7 @@ trait IOIterator[T] { running.incrementAndGet() recursiveStream(f) } - ios.sequence.map(_ => ()) + ios.tasks.map(_ => ()) } private def recursiveStream(f: T => Task[Unit]): Task[Unit] = next().flatMap { diff --git a/benchmark/src/main/scala/benchmark/bench/ReportGenerator.scala b/benchmark/src/main/scala/benchmark/bench/ReportGenerator.scala index 54485e55..1ce92179 100644 --- a/benchmark/src/main/scala/benchmark/bench/ReportGenerator.scala +++ b/benchmark/src/main/scala/benchmark/bench/ReportGenerator.scala @@ -4,6 +4,7 @@ import fabric.io.{JsonFormatter, JsonParser} import fabric.rw._ import io.quickchart.QuickChart import org.apache.commons.io.FileUtils +import perfolation.double2Implicits import java.io.File @@ -79,6 +80,11 @@ object ReportGenerator { )) )) ) + /*scribe.info(s"$name Total Time:") + nameAndReports.foreach { + case (name, reports) => scribe.info(s"\t$name: ${reports.logs.last.elapsed.f(f = 3)} seconds") + } + scribe.info("-------------------------")*/ quickChart.setConfig(chart.config) quickChart.toFile(s"${outputDirectory.getName}/$name-total-time.png") } diff --git a/benchmark/src/main/scala/benchmark/bench/Runner.scala b/benchmark/src/main/scala/benchmark/bench/Runner.scala index 4a632eb9..6bf959e5 100644 --- a/benchmark/src/main/scala/benchmark/bench/Runner.scala +++ b/benchmark/src/main/scala/benchmark/bench/Runner.scala @@ -1,6 +1,6 @@ package benchmark.bench -import benchmark.bench.impl.{DerbyBench, H2Bench, LightDBBench, MongoDBBench, PostgreSQLBench, SQLiteBench} +import benchmark.bench.impl.{DerbyBench, H2Bench, LightDBAsyncBench, LightDBBench, MongoDBBench, PostgreSQLBench, SQLiteBench} import fabric.io.JsonFormatter import fabric.rw._ import lightdb.h2.H2Store @@ -29,10 +29,11 @@ object Runner { "LightDB-Map-SQLite" -> LightDBBench(SplitStoreManager(MapStore, SQLiteStore)), "LightDB-HaloDB-SQLite" -> LightDBBench(SplitStoreManager(HaloDBStore, SQLiteStore)), "LightDB-Lucene" -> LightDBBench(LuceneStore), - "LightDB-HaloDB-Lucene" -> LightDBBench(SplitStoreManager(HaloDBStore, LuceneStore, searchingMode = StoreMode.Indexes)), - "LightDB-RocksDB-Lucene" -> LightDBBench(SplitStoreManager(RocksDBStore, LuceneStore, searchingMode = StoreMode.Indexes)), + "LightDB-HaloDB-Lucene" -> LightDBBench(SplitStoreManager(HaloDBStore, LuceneStore)), + "LightDB-RocksDB-Lucene" -> LightDBBench(SplitStoreManager(RocksDBStore, LuceneStore)), "LightDB-H2" -> LightDBBench(H2Store), - "LightDB-HaloDB-H2" -> LightDBBench(SplitStoreManager(HaloDBStore, H2Store, searchingMode = StoreMode.Indexes)), + "LightDB-HaloDB-H2" -> LightDBBench(SplitStoreManager(HaloDBStore, H2Store)), + "LightDB-Async-HaloDB-Lucene" -> LightDBAsyncBench(SplitStoreManager(HaloDBStore, LuceneStore)), // "LightDB-PostgreSQL" -> LightDBBench(PostgreSQLStoreManager(HikariConnectionManager(SQLConfig( // jdbcUrl = s"jdbc:postgresql://localhost:5432/basic", // username = Some("postgres"), diff --git a/benchmark/src/main/scala/benchmark/bench/impl/LightDBAsyncBench.scala b/benchmark/src/main/scala/benchmark/bench/impl/LightDBAsyncBench.scala new file mode 100644 index 00000000..0ee70645 --- /dev/null +++ b/benchmark/src/main/scala/benchmark/bench/impl/LightDBAsyncBench.scala @@ -0,0 +1,98 @@ +package benchmark.bench.impl + +import benchmark.bench.Bench +import fabric.rw.RW +import lightdb.Id +import lightdb.async.{AsyncCollection, AsyncDatabaseUpgrade, AsyncLightDB} +import lightdb.collection.Collection +import lightdb.store.StoreManager +import lightdb.doc.{Document, DocumentModel, JsonConversion} +import lightdb.sql.SQLConversion +import rapid.Task + +import java.nio.file.Path +import java.sql.ResultSet + +case class LightDBAsyncBench(storeManager: StoreManager) extends Bench { bench => + override def name: String = s"LightDB Async ${storeManager.name}" + + override def init(): Unit = DB.init().sync() + + implicit def p2Person(p: P): Person = Person(p.name, p.age, Id(p.id)) + + def toP(person: Person): P = P(person.name, person.age, person._id.value) + + override protected def insertRecords(iterator: Iterator[P]): Unit = DB.people.transaction { implicit transaction => + rapid.Stream.fromIterator(Task(iterator)).evalMap { p => + DB.people.insert(p) + }.drain + }.sync() + + override protected def streamRecords(f: Iterator[P] => Unit): Unit = DB.people.transaction { implicit transaction => + Task(f(DB.people.underlying.iterator.map(toP))) + }.sync() + + override protected def getEachRecord(idIterator: Iterator[String]): Unit = DB.people.transaction { implicit transaction => + rapid.Stream.fromIterator(Task(idIterator)) + .evalMap { idString => + val id = Person.id(idString) + DB.people.get(id).map { + case Some(person) => + if (person._id.value != idString) { + scribe.warn(s"${person._id.value} was not $id") + } + case None => scribe.warn(s"$id was not found") + } + } + .drain + }.sync() + + override protected def searchEachRecord(ageIterator: Iterator[Int]): Unit = DB.people.transaction { implicit transaction => + rapid.Stream.fromIterator(Task(ageIterator)) + .evalMap { age => + DB.people.query.filter(_.age === age).one.map { person => + if (person.age != age) { + scribe.warn(s"${person.age} was not $age") + } + } + } + .drain + }.sync() + + override protected def searchAllRecords(f: Iterator[P] => Unit): Unit = DB.people.transaction { implicit transaction => + Task { + val iterator = DB.people.underlying.query.search.docs.iterator.map(toP) + f(iterator) + } + }.sync() + + override def size(): Long = -1L + + override def dispose(): Unit = DB.people.dispose().sync() + + object DB extends AsyncLightDB { + Collection.CacheQueries = true + + override def directory: Option[Path] = Some(Path.of(s"db/Async${storeManager.getClass.getSimpleName.replace("$", "")}")) + + val people: AsyncCollection[Person, Person.type] = collection(Person) + + override def storeManager: StoreManager = bench.storeManager + override def upgrades: List[AsyncDatabaseUpgrade] = Nil + } + + case class Person(name: String, age: Int, _id: Id[Person] = Person.id()) extends Document[Person] + + object Person extends DocumentModel[Person] with SQLConversion[Person] with JsonConversion[Person] { + override implicit val rw: RW[Person] = RW.gen + + override def convertFromSQL(rs: ResultSet): Person = Person( + name = rs.getString("name"), + age = rs.getInt("age"), + _id = id(rs.getString("_id")) + ) + + val name: F[String] = field("name", _.name) + val age: I[Int] = field.index("age", _.age) + } +} diff --git a/benchmark/src/main/scala/benchmark/bench/impl/LightDBBench.scala b/benchmark/src/main/scala/benchmark/bench/impl/LightDBBench.scala index f32b17a5..aef6fcd3 100644 --- a/benchmark/src/main/scala/benchmark/bench/impl/LightDBBench.scala +++ b/benchmark/src/main/scala/benchmark/bench/impl/LightDBBench.scala @@ -74,9 +74,11 @@ case class LightDBBench(storeManager: StoreManager) extends Bench { bench => override def dispose(): Unit = DB.people.dispose() object DB extends LightDB { + Collection.CacheQueries = true + override lazy val directory: Option[Path] = Some(Path.of(s"db/${storeManager.getClass.getSimpleName.replace("$", "")}")) - val people: Collection[Person, Person.type] = collection(Person, cacheQueries = true) + val people: Collection[Person, Person.type] = collection(Person) override def storeManager: StoreManager = bench.storeManager override def upgrades: List[DatabaseUpgrade] = Nil diff --git a/benchmark/src/main/scala/benchmark/imdb/BenchmarkImplementation.scala b/benchmark/src/main/scala/benchmark/imdb/BenchmarkImplementation.scala index f34383a4..98c7249f 100644 --- a/benchmark/src/main/scala/benchmark/imdb/BenchmarkImplementation.scala +++ b/benchmark/src/main/scala/benchmark/imdb/BenchmarkImplementation.scala @@ -1,6 +1,6 @@ package benchmark.imdb -import cats.effect.IO +import rapid.Task trait BenchmarkImplementation { type TitleAka diff --git a/benchmark/src/main/scala/benchmark/imdb/MariaDBImplementation.scala b/benchmark/src/main/scala/benchmark/imdb/MariaDBImplementation.scala index 94f75b28..32f1cb17 100644 --- a/benchmark/src/main/scala/benchmark/imdb/MariaDBImplementation.scala +++ b/benchmark/src/main/scala/benchmark/imdb/MariaDBImplementation.scala @@ -3,6 +3,7 @@ package benchmark.imdb import benchmark.FlushingBacklog import cats.effect.IO import cats.effect.unsafe.IORuntime +import rapid.Task import java.sql.{Connection, DriverManager, ResultSet} @@ -127,7 +128,7 @@ object MariaDBImplementation extends BenchmarkImplementation { None } } - fs2.Stream.fromBlockingIterator[IO](iterator, 512) + rapid.Stream.fromIterator(Task(iterator)) } finally { s.closeOnCompletion() } @@ -174,7 +175,7 @@ object MariaDBImplementation extends BenchmarkImplementation { override def flush(): Task[Unit] = for { _ <- backlogAka.flush() - _ <- IO(commit()) + _ <- Task(commit()) } yield { () } diff --git a/benchmark/src/main/scala/benchmark/imdb/MongoDBImplementation.scala b/benchmark/src/main/scala/benchmark/imdb/MongoDBImplementation.scala index e7bbe69b..69f2646c 100644 --- a/benchmark/src/main/scala/benchmark/imdb/MongoDBImplementation.scala +++ b/benchmark/src/main/scala/benchmark/imdb/MongoDBImplementation.scala @@ -7,6 +7,7 @@ import com.mongodb.client.MongoClients import com.mongodb.client.model.Indexes import lightdb.Unique import org.bson.Document +import rapid.Task import java.{lang, util} import scala.jdk.CollectionConverters._ @@ -81,7 +82,7 @@ object MongoDBImplementation extends BenchmarkImplementation { override def streamTitleAka(): rapid.Stream[Document] = { val iterator: Iterator[Document] = titleAka.find().iterator().asScala - fs2.Stream.fromBlockingIterator[IO](iterator, 512) + rapid.Stream.fromIterator(Task(iterator)) } override def idFor(t: Document): String = t.getString("_id") diff --git a/benchmark/src/main/scala/benchmark/imdb/PostgresImplementation.scala b/benchmark/src/main/scala/benchmark/imdb/PostgresImplementation.scala index ccdc9094..55203780 100644 --- a/benchmark/src/main/scala/benchmark/imdb/PostgresImplementation.scala +++ b/benchmark/src/main/scala/benchmark/imdb/PostgresImplementation.scala @@ -3,6 +3,7 @@ package benchmark.imdb import benchmark.FlushingBacklog import cats.effect.IO import cats.effect.unsafe.IORuntime +import rapid.Task import java.sql.{Connection, DriverManager, ResultSet} @@ -127,7 +128,7 @@ object PostgresImplementation extends BenchmarkImplementation { None } } - fs2.Stream.fromBlockingIterator[IO](iterator, 512) + rapid.Stream.fromIterator(Task(iterator)) } finally { s.closeOnCompletion() } @@ -174,7 +175,7 @@ object PostgresImplementation extends BenchmarkImplementation { override def flush(): Task[Unit] = for { _ <- backlogAka.flush() - _ <- IO(commit()) + _ <- Task(commit()) } yield { () } diff --git a/benchmark/src/main/scala/benchmark/imdb/SQLiteImplementation.scala b/benchmark/src/main/scala/benchmark/imdb/SQLiteImplementation.scala index 1020578d..e009cacf 100644 --- a/benchmark/src/main/scala/benchmark/imdb/SQLiteImplementation.scala +++ b/benchmark/src/main/scala/benchmark/imdb/SQLiteImplementation.scala @@ -4,6 +4,7 @@ import benchmark.FlushingBacklog import cats.effect.IO import cats.effect.unsafe.IORuntime import lightdb.Unique +import rapid.Task import java.sql.{Connection, DriverManager, ResultSet} @@ -128,7 +129,7 @@ object SQLiteImplementation extends BenchmarkImplementation { None } } - fs2.Stream.fromBlockingIterator[IO](iterator, 512) + rapid.Stream.fromIterator(Task(iterator)) } finally { s.closeOnCompletion() } @@ -175,7 +176,7 @@ object SQLiteImplementation extends BenchmarkImplementation { override def flush(): Task[Unit] = for { _ <- backlogAka.flush() - _ <- IO(commit()) + _ <- Task(commit()) } yield { () } diff --git a/benchmark/src/main/scala/benchmark/imdb/ScarangoImplementation.scala b/benchmark/src/main/scala/benchmark/imdb/ScarangoImplementation.scala index 4c7e45ea..ac485e21 100644 --- a/benchmark/src/main/scala/benchmark/imdb/ScarangoImplementation.scala +++ b/benchmark/src/main/scala/benchmark/imdb/ScarangoImplementation.scala @@ -1,127 +1,128 @@ -package benchmark.imdb - -import benchmark.FlushingBacklog -import cats.effect.IO -import com.outr.arango.collection.DocumentCollection -import com.outr.arango.query._ -import com.outr.arango.query.dsl.ref2Wrapped -import com.outr.arango.{Document, DocumentModel, Field, Graph, Id, Index} -import fabric.rw._ - -object ScarangoImplementation extends BenchmarkImplementation { - override type TitleAka = TitleAkaADB - override type TitleBasics = TitleBasicsADB - - private lazy val backlogAka = new FlushingBacklog[Id[TitleAkaADB], TitleAkaADB](1000, 10000) { - override protected def write(list: List[TitleAkaADB]): Task[Unit] = db.titleAka.batch.insert(list).map(_ => ()) - } - - private lazy val backlogBasics = new FlushingBacklog[Id[TitleBasicsADB], TitleBasicsADB](1000, 10000) { - override protected def write(list: List[TitleBasicsADB]): Task[Unit] = - db.titleBasics.batch.insert(list).map(_ => ()) - } - - override def name: String = "Scarango" - - override def init(): Task[Unit] = db.init() - - override def map2TitleAka(map: Map[String, String]): TitleAkaADB = { - val title = map.value("title") - val attributes = map.list("attributes") - TitleAkaADB( - titleId = map.value("titleId"), - ordering = map.int("ordering"), - title = title, - region = map.option("region"), - language = map.option("language"), - types = map.list("types"), - attributes = attributes, - isOriginalTitle = map.boolOption("isOriginalTitle") - ) - } - - override def map2TitleBasics(map: Map[String, String]): TitleBasicsADB = TitleBasicsADB( - tconst = map.value("tconst"), - titleType = map.value("titleType"), - primaryTitle = map.value("primaryTitle"), - originalTitle = map.value("originalTitle"), - isAdult = map.bool("isAdult"), - startYear = map.int("startYear"), - endYear = map.int("endYear"), - runtimeMinutes = map.int("runtimeMinutes"), - genres = map.list("genres") - ) - - override def persistTitleAka(t: TitleAkaADB): Task[Unit] = backlogAka.enqueue(t._id, t).map(_ => ()) - - override def persistTitleBasics(t: TitleBasicsADB): Task[Unit] = backlogBasics.enqueue(t._id, t).map(_ => ()) - - override def flush(): Task[Unit] = for { - _ <- backlogAka.flush() - _ <- backlogBasics.flush() - } yield { - () - } - - override def idFor(t: TitleAkaADB): String = t._id.value - - override def titleIdFor(t: TitleAkaADB): String = t.titleId - - override def streamTitleAka(): rapid.Stream[TitleAkaADB] = db.titleAka.query.stream() - - override def verifyTitleAka(): Task[Unit] = db.titleAka - .query(aql"FOR d IN titleAka COLLECT WITH COUNT INTO length RETURN length") - .as[Int] - .one - .map { count => - scribe.info(s"TitleAka counts -- $count") - } - - override def verifyTitleBasics(): Task[Unit] = db.titleAka - .query(aql"FOR d IN titleBasics COLLECT WITH COUNT INTO length RETURN length") - .as[Int] - .one - .map { count => - scribe.info(s"TitleBasics counts -- $count") - } - - override def get(id: String): Task[TitleAkaADB] = db.titleAka(TitleAkaADB.id(id)) - - override def findByTitleId(titleId: String): Task[List[TitleAkaADB]] = db.titleAka - .query - .byFilter(ref => ref.titleId === titleId) - .toList - - object db extends Graph("imdb") { - val titleAka: DocumentCollection[TitleAkaADB, TitleAkaADB.type] = vertex(TitleAkaADB) - val titleBasics: DocumentCollection[TitleBasicsADB, TitleBasicsADB.type] = vertex(TitleBasicsADB) - } - - case class TitleAkaADB(titleId: String, ordering: Int, title: String, region: Option[String], language: Option[String], types: List[String], attributes: List[String], isOriginalTitle: Option[Boolean], _id: Id[TitleAkaADB] = TitleAkaADB.id()) extends Document[TitleAkaADB] - - object TitleAkaADB extends DocumentModel[TitleAkaADB] { - override implicit val rw: RW[TitleAkaADB] = RW.gen - - val titleId: Field[String] = field("titleId") - val ordering: Field[Int] = field("ordering") - val title: Field[String] = field("title") - - override def indexes: List[Index] = List(titleId.index.persistent()) - - override val collectionName: String = "titleAka" - } - - case class TitleBasicsADB(tconst: String, titleType: String, primaryTitle: String, originalTitle: String, isAdult: Boolean, startYear: Int, endYear: Int, runtimeMinutes: Int, genres: List[String], _id: Id[TitleBasicsADB] = TitleBasicsADB.id()) extends Document[TitleBasicsADB] - - object TitleBasicsADB extends DocumentModel[TitleBasicsADB] { - override implicit val rw: RW[TitleBasicsADB] = RW.gen - - val tconst: Field[String] = field("tconst") - val primaryTitle: Field[String] = field("primaryTitle") - val originalTitle: Field[String] = field("originalTitle") - - override def indexes: List[Index] = Nil - - override val collectionName: String = "titleBasics" - } -} \ No newline at end of file +//package benchmark.imdb +// +//import benchmark.FlushingBacklog +//import cats.effect.IO +//import com.outr.arango.collection.DocumentCollection +//import com.outr.arango.query._ +//import com.outr.arango.query.dsl.ref2Wrapped +//import com.outr.arango.{Document, DocumentModel, Field, Graph, Id, Index} +//import fabric.rw._ +//import rapid.Task +// +//object ScarangoImplementation extends BenchmarkImplementation { +// override type TitleAka = TitleAkaADB +// override type TitleBasics = TitleBasicsADB +// +// private lazy val backlogAka = new FlushingBacklog[Id[TitleAkaADB], TitleAkaADB](1000, 10000) { +// override protected def write(list: List[TitleAkaADB]): Task[Unit] = db.titleAka.batch.insert(list).map(_ => ()) +// } +// +// private lazy val backlogBasics = new FlushingBacklog[Id[TitleBasicsADB], TitleBasicsADB](1000, 10000) { +// override protected def write(list: List[TitleBasicsADB]): Task[Unit] = +// db.titleBasics.batch.insert(list).map(_ => ()) +// } +// +// override def name: String = "Scarango" +// +// override def init(): Task[Unit] = db.init() +// +// override def map2TitleAka(map: Map[String, String]): TitleAkaADB = { +// val title = map.value("title") +// val attributes = map.list("attributes") +// TitleAkaADB( +// titleId = map.value("titleId"), +// ordering = map.int("ordering"), +// title = title, +// region = map.option("region"), +// language = map.option("language"), +// types = map.list("types"), +// attributes = attributes, +// isOriginalTitle = map.boolOption("isOriginalTitle") +// ) +// } +// +// override def map2TitleBasics(map: Map[String, String]): TitleBasicsADB = TitleBasicsADB( +// tconst = map.value("tconst"), +// titleType = map.value("titleType"), +// primaryTitle = map.value("primaryTitle"), +// originalTitle = map.value("originalTitle"), +// isAdult = map.bool("isAdult"), +// startYear = map.int("startYear"), +// endYear = map.int("endYear"), +// runtimeMinutes = map.int("runtimeMinutes"), +// genres = map.list("genres") +// ) +// +// override def persistTitleAka(t: TitleAkaADB): Task[Unit] = backlogAka.enqueue(t._id, t).map(_ => ()) +// +// override def persistTitleBasics(t: TitleBasicsADB): Task[Unit] = backlogBasics.enqueue(t._id, t).map(_ => ()) +// +// override def flush(): Task[Unit] = for { +// _ <- backlogAka.flush() +// _ <- backlogBasics.flush() +// } yield { +// () +// } +// +// override def idFor(t: TitleAkaADB): String = t._id.value +// +// override def titleIdFor(t: TitleAkaADB): String = t.titleId +// +// override def streamTitleAka(): rapid.Stream[TitleAkaADB] = db.titleAka.query.stream() +// +// override def verifyTitleAka(): Task[Unit] = db.titleAka +// .query(aql"FOR d IN titleAka COLLECT WITH COUNT INTO length RETURN length") +// .as[Int] +// .one +// .map { count => +// scribe.info(s"TitleAka counts -- $count") +// } +// +// override def verifyTitleBasics(): Task[Unit] = db.titleAka +// .query(aql"FOR d IN titleBasics COLLECT WITH COUNT INTO length RETURN length") +// .as[Int] +// .one +// .map { count => +// scribe.info(s"TitleBasics counts -- $count") +// } +// +// override def get(id: String): Task[TitleAkaADB] = db.titleAka(TitleAkaADB.id(id)) +// +// override def findByTitleId(titleId: String): Task[List[TitleAkaADB]] = db.titleAka +// .query +// .byFilter(ref => ref.titleId === titleId) +// .toList +// +// object db extends Graph("imdb") { +// val titleAka: DocumentCollection[TitleAkaADB, TitleAkaADB.type] = vertex(TitleAkaADB) +// val titleBasics: DocumentCollection[TitleBasicsADB, TitleBasicsADB.type] = vertex(TitleBasicsADB) +// } +// +// case class TitleAkaADB(titleId: String, ordering: Int, title: String, region: Option[String], language: Option[String], types: List[String], attributes: List[String], isOriginalTitle: Option[Boolean], _id: Id[TitleAkaADB] = TitleAkaADB.id()) extends Document[TitleAkaADB] +// +// object TitleAkaADB extends DocumentModel[TitleAkaADB] { +// override implicit val rw: RW[TitleAkaADB] = RW.gen +// +// val titleId: Field[String] = field("titleId") +// val ordering: Field[Int] = field("ordering") +// val title: Field[String] = field("title") +// +// override def indexes: List[Index] = List(titleId.index.persistent()) +// +// override val collectionName: String = "titleAka" +// } +// +// case class TitleBasicsADB(tconst: String, titleType: String, primaryTitle: String, originalTitle: String, isAdult: Boolean, startYear: Int, endYear: Int, runtimeMinutes: Int, genres: List[String], _id: Id[TitleBasicsADB] = TitleBasicsADB.id()) extends Document[TitleBasicsADB] +// +// object TitleBasicsADB extends DocumentModel[TitleBasicsADB] { +// override implicit val rw: RW[TitleBasicsADB] = RW.gen +// +// val tconst: Field[String] = field("tconst") +// val primaryTitle: Field[String] = field("primaryTitle") +// val originalTitle: Field[String] = field("originalTitle") +// +// override def indexes: List[Index] = Nil +// +// override val collectionName: String = "titleBasics" +// } +//} \ No newline at end of file diff --git a/build.sbt b/build.sbt index e7b5c9c4..46c8e2b4 100644 --- a/build.sbt +++ b/build.sbt @@ -16,7 +16,7 @@ val developerURL: String = "https://matthicks.com" name := projectName ThisBuild / organization := org ThisBuild / version := "1.3.0-SNAPSHOT" -ThisBuild / scalaVersion := scala3 +ThisBuild / scalaVersion := scala213 ThisBuild / crossScalaVersions := allScalaVersions ThisBuild / scalacOptions ++= Seq("-unchecked", "-deprecation") @@ -82,7 +82,7 @@ val h2Version: String = "2.3.232" val postgresqlVersion: String = "42.7.3" -val rapidVersion: String = "0.3.2" +val rapidVersion: String = "0.3.3-SNAPSHOT" val scalaTestVersion: String = "3.2.19" diff --git a/run_benchmarks.sh b/run_benchmarks.sh index 1fbb29bb..142446ef 100755 --- a/run_benchmarks.sh +++ b/run_benchmarks.sh @@ -6,7 +6,7 @@ set -e #declare -a arr=("LightDB-SQLite" "LightDB-Map-SQLite" "LightDB-HaloDB-SQLite" "LightDB-Lucene" "LightDB-HaloDB-Lucene" "LightDB-H2" "LightDB-HaloDB-H2") #declare -a arr=("LightDB-Lucene" "LightDB-HaloDB-Lucene") #declare -a arr=("SQLite" "PostgreSQL" "H2" "Derby" "MongoDB" "LightDB-SQLite" "LightDB-Map-SQLite" "LightDB-HaloDB-SQLite" "LightDB-Lucene" "LightDB-HaloDB-Lucene" "LightDB-RocksDB-Lucene" "LightDB-H2" "LightDB-HaloDB-H2") -declare -a arr=("LightDB-H2" "LightDB-HaloDB-H2") +declare -a arr=("LightDB-HaloDB-Lucene") for i in "${arr[@]}" do