From ba3dfbf08eb3970d79470d1240a407252d192abe Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 5 Feb 2024 10:33:13 +0100 Subject: [PATCH] feat: Data partitions (#508) * H2Dialect create tables * verify slice range within data partition * ci job with 4 data partitions * partition over multiple databases * all persistenceIds queries * retrieve from each data partition and combine the results * mention min number of projection instances * keep journalTableWithSchema for compatibility * mima filter * and utility to retrieve connection factory names --- .github/workflows/build-test.yml | 35 +++ .../data-partition.excludes | 3 + core/src/main/resources/reference.conf | 32 +++ .../persistence/r2dbc/R2dbcSettings.scala | 176 ++++++++++++-- .../scaladsl/DurableStateCleanup.scala | 12 +- .../scaladsl/EventSourcedCleanup.scala | 15 +- .../r2dbc/internal/BySliceQuery.scala | 4 +- .../persistence/r2dbc/internal/Dialect.scala | 8 +- .../r2dbc/internal/R2dbcExecutor.scala | 34 +++ .../r2dbc/internal/h2/H2Dialect.scala | 48 ++-- .../r2dbc/internal/h2/H2DurableStateDao.scala | 6 +- .../r2dbc/internal/h2/H2JournalDao.scala | 21 +- .../r2dbc/internal/h2/H2QueryDao.scala | 9 +- .../r2dbc/internal/h2/H2SnapshotDao.scala | 6 +- .../internal/postgres/PostgresDialect.scala | 18 +- .../postgres/PostgresDurableStateDao.scala | 13 +- .../postgres/PostgresJournalDao.scala | 148 ++++++------ .../internal/postgres/PostgresQueryDao.scala | 178 +++++++++------ .../postgres/PostgresSnapshotDao.scala | 11 +- .../internal/postgres/YugabyteDialect.scala | 18 +- .../postgres/YugabyteDurableStateDao.scala | 6 +- .../internal/postgres/YugabyteQueryDao.scala | 6 +- .../postgres/YugabyteSnapshotDao.scala | 7 +- .../internal/sqlserver/SqlServerDialect.scala | 18 +- .../sqlserver/SqlServerDurableStateDao.scala | 8 +- .../sqlserver/SqlServerJournalDao.scala | 12 +- .../sqlserver/SqlServerQueryDao.scala | 36 +-- .../sqlserver/SqlServerSnapshotDao.scala | 8 +- .../r2dbc/journal/R2dbcJournal.scala | 19 +- .../query/javadsl/R2dbcReadJournal.scala | 5 + .../query/scaladsl/R2dbcReadJournal.scala | 23 +- .../r2dbc/session/scaladsl/R2dbcSession.scala | 1 + .../r2dbc/snapshot/R2dbcSnapshotStore.scala | 15 +- .../scaladsl/R2dbcDurableStateStore.scala | 19 +- .../application-postgres-data-partitions.conf | 12 + .../akka/persistence/r2dbc/PayloadSpec.scala | 5 +- .../persistence/r2dbc/R2dbcSettingsSpec.scala | 215 +++++++++++++++++- .../persistence/r2dbc/TestDbLifecycle.scala | 36 +-- .../scaladsl/EventSourcedCleanupSpec.scala | 24 +- .../H2AdditionalInitForSchemaSpec.scala | 3 + .../r2dbc/internal/R2dbcExecutorSpec.scala | 3 +- .../journal/PersistSerializedEventSpec.scala | 5 +- .../r2dbc/journal/PersistTagsSpec.scala | 28 ++- .../r2dbc/journal/PersistTimestampSpec.scala | 46 ++-- .../CurrentPersistenceIdsQuerySpec.scala | 1 + .../query/EventsBySliceBacktrackingSpec.scala | 10 +- .../r2dbc/query/EventsBySlicePerfSpec.scala | 2 + ...eStateUpdateWithChangeEventStoreSpec.scala | 53 +++-- ddl-scripts/create_tables_postgres_0-1.sql | 90 ++++++++ ddl-scripts/create_tables_postgres_2-3.sql | 90 ++++++++ docker/docker-compose-postgres-2.yml | 30 +++ .../r2dbc/migration/MigrationTool.scala | 22 +- .../r2dbc/migration/MigrationToolDao.scala | 18 +- 53 files changed, 1246 insertions(+), 425 deletions(-) create mode 100644 core/src/main/mima-filters/1.2.1.backwards.excludes/data-partition.excludes create mode 100644 core/src/test/resources/application-postgres-data-partitions.conf create mode 100644 ddl-scripts/create_tables_postgres_0-1.sql create mode 100644 ddl-scripts/create_tables_postgres_2-3.sql create mode 100644 docker/docker-compose-postgres-2.yml diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index ce4f705e..8eb285f7 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -112,6 +112,41 @@ jobs: -Dakka.persistence.r2dbc.state.payload-column-type=JSONB \ "core/testOnly akka.persistence.r2dbc.PayloadSpec" + test-postgres-data-partitions: + name: Run test with Postgres and several data partitions + runs-on: ubuntu-22.04 + if: github.repository == 'akka/akka-persistence-r2dbc' + steps: + - name: Checkout + uses: actions/checkout@v3.1.0 + with: + fetch-depth: 0 + + - name: Checkout GitHub merge + if: github.event.pull_request + run: |- + git fetch origin pull/${{ github.event.pull_request.number }}/merge:scratch + git checkout scratch + + - name: Cache Coursier cache + uses: coursier/cache-action@v6.4.0 + + - name: Set up JDK 11 + uses: coursier/setup-action@v1.3.0 + with: + jvm: temurin:1.11.0 + + - name: Start DB + run: |- + docker compose -f docker/docker-compose-postgres-2.yml up --wait + docker exec -i postgres-db-0 psql -U postgres -t < ddl-scripts/create_tables_postgres_0-1.sql + docker exec -i postgres-db-1 psql -U postgres -t < ddl-scripts/create_tables_postgres_2-3.sql + + - name: sbt test + run: |- + cp .jvmopts-ci .jvmopts + sbt -Dconfig.resource=application-postgres-data-partitions.conf test + test-yugabyte: name: Run tests with Yugabyte runs-on: ubuntu-22.04 diff --git a/core/src/main/mima-filters/1.2.1.backwards.excludes/data-partition.excludes b/core/src/main/mima-filters/1.2.1.backwards.excludes/data-partition.excludes new file mode 100644 index 00000000..cbf5198e --- /dev/null +++ b/core/src/main/mima-filters/1.2.1.backwards.excludes/data-partition.excludes @@ -0,0 +1,3 @@ +# internals +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.r2dbc.internal.BySliceQuery#Dao.currentDbTimestamp") +ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.persistence.r2dbc.internal.BySliceQuery#Dao.currentDbTimestamp") diff --git a/core/src/main/resources/reference.conf b/core/src/main/resources/reference.conf index 6e49e8d7..c52a6e33 100644 --- a/core/src/main/resources/reference.conf +++ b/core/src/main/resources/reference.conf @@ -51,6 +51,7 @@ akka.persistence.r2dbc { # replay filter not needed for this plugin replay-filter.mode = off + } } // #journal-settings @@ -182,6 +183,36 @@ akka.persistence.r2dbc { } } +// #data-partition-settings +# Number of tables and databases that the data will be split into. The selection of data +# partition is made from the slice of the persistenceId. +# For example, 4 data partitions means that slice range (0 to 255) maps to data partition 0, +# (256 to 511) to data partition 1, (512 to 767) to data partition 3, and (768 to 1023) to +# data partition 3. +# This configuration cannot be changed in a rolling update, since the data must be moved +# between the tables if number of data partitions is changed. +# The number of Projection instances when using eventsBySlices must be greater than or equal +# to the number of data partitions, because a query for a slice range cannot span over more +# than one data partition. +akka.persistence.r2dbc.data-partition { + # How many tables the data will be partitioned over. The tables will have + # the data partition as suffix, e.g. event_journal_0, event_journal_1. + # Must be between 1 and 1024 and a whole number divisor of 1024 (number of slices). + # When number-of-partitions is 1 the table name is without suffix. + number-of-partitions = 1 + # How many databases the tables will be partitioned over. A database corresponds to a connection + # factory with its own connection pool. + # Must be a whole number divisor of number-of-partitions, and less than or equal to number-of-partitions. + # For example, number-of-partitions=8 and number-of-databases=2 means that there will be a total of + # 8 tables in 2 databases, i.e. 4 tables in each database. + # The connection-factory setting will have the data partition range as suffix, e.g. with 8 data partitions and + # 2 databases the connection factory settings are connection-factory-0-3, connection-factory-4-7. + # When number-of-databases is 1 there will only be one connection factory, without suffix. + # number-of-databases > 1 not supported by H2. + number-of-databases = 1 +} +// #data-partition-settings + // #connection-settings akka.persistence.r2dbc { @@ -354,6 +385,7 @@ akka.persistence.r2dbc { journal-table = ${akka.persistence.r2dbc.journal.table} state-table = ${akka.persistence.r2dbc.state.table} snapshot-table = ${akka.persistence.r2dbc.snapshot.table} + number-of-partitions = ${akka.persistence.r2dbc.data-partition.number-of-partitions} // #connection-settings-h2 } diff --git a/core/src/main/scala/akka/persistence/r2dbc/R2dbcSettings.scala b/core/src/main/scala/akka/persistence/r2dbc/R2dbcSettings.scala index 5f64b4fc..249a3b37 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/R2dbcSettings.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/R2dbcSettings.scala @@ -4,6 +4,7 @@ package akka.persistence.r2dbc +import scala.collection.immutable import akka.annotation.InternalApi import akka.annotation.InternalStableApi import akka.persistence.r2dbc.internal.codec.IdentityAdapter @@ -26,6 +27,9 @@ import scala.concurrent.duration._ @InternalStableApi object R2dbcSettings { + // must correspond to akka.persistence.Persistence.numberOfSlices + private val NumberOfSlices = 1024 + def apply(config: Config): R2dbcSettings = { if (config.hasPath("dialect")) { throw new IllegalArgumentException( @@ -34,13 +38,6 @@ object R2dbcSettings { "see akka-persistence-r2dbc documentation for details on the new configuration scheme: " + "https://doc.akka.io/docs/akka-persistence-r2dbc/current/migration-guide.html") } - if (!config.hasPath("connection-factory.dialect")) { - throw new IllegalArgumentException( - "The Akka Persistence R2DBC database config scheme has changed, the config needs to be updated " + - "to choose database dialect using the connection-factory block, " + - "see akka-persistence-r2dbc documentation for details on the new configuration scheme: " + - "https://doc.akka.io/docs/akka-persistence-r2dbc/current/migration-guide.html") - } val schema: Option[String] = Option(config.getString("schema")).filterNot(_.trim.isEmpty) @@ -77,7 +74,45 @@ object R2dbcSettings { val durableStateAssertSingleWriter: Boolean = config.getBoolean("state.assert-single-writer") - val connectionFactorySettings = ConnectionFactorySettings(config.getConfig("connection-factory")) + val numberOfDataPartitions = config.getInt("data-partition.number-of-partitions") + require( + 1 <= numberOfDataPartitions && numberOfDataPartitions <= NumberOfSlices, + s"data-partition.number-of-partitions [$numberOfDataPartitions] must be between 1 and $NumberOfSlices") + require( + numberOfDataPartitions * (NumberOfSlices / numberOfDataPartitions) == NumberOfSlices, + s"data-partition.number-of-partitions [$numberOfDataPartitions] must be a whole number divisor of " + + s"numberOfSlices [$NumberOfSlices].") + + val numberOfDatabases = config.getInt("data-partition.number-of-databases") + require( + 1 <= numberOfDatabases && numberOfDatabases <= numberOfDataPartitions, + s"data-partition.number-of-databases [$numberOfDatabases] must be between 1 and $numberOfDataPartitions") + require( + numberOfDatabases * (numberOfDataPartitions / numberOfDatabases) == numberOfDataPartitions, + s"data-partition.number-of-databases [$numberOfDatabases] must be a whole number divisor of " + + s"data-partition.number-of-partitions [$numberOfDataPartitions].") + + val connectionFactorySettings = + if (numberOfDatabases == 1) { + if (!config.hasPath("connection-factory.dialect")) { + throw new IllegalArgumentException( + "The Akka Persistence R2DBC database config scheme has changed, the config needs to be updated " + + "to choose database dialect using the connection-factory block, " + + "see akka-persistence-r2dbc documentation for details on the new configuration scheme: " + + "https://doc.akka.io/docs/akka-persistence-r2dbc/current/migration-guide.html") + } + Vector(ConnectionFactorySettings(config.getConfig("connection-factory"))) + } else { + val rangeSize = numberOfDataPartitions / numberOfDatabases + (0 until numberOfDatabases).map { i => + val configPropertyName = s"connection-factory-${i * rangeSize}-${i * rangeSize + rangeSize - 1}" + ConnectionFactorySettings(config.getConfig(configPropertyName)) + } + } + + require( + connectionFactorySettings.map(_.dialect.name).toSet.size == 1, + s"All dialects for the [${connectionFactorySettings.size}] database partitions must be the same.") val querySettings = new QuerySettings(config.getConfig("query")) @@ -99,13 +134,13 @@ object R2dbcSettings { val durableStatePayloadCodec: PayloadCodec = if (useJsonPayload("state")) PayloadCodec.JsonCodec else PayloadCodec.ByteArrayCodec - connectionFactorySettings.dialect.name match { + connectionFactorySettings.head.dialect.name match { case "sqlserver" => new CodecSettings( journalPayloadCodec, snapshotPayloadCodec, durableStatePayloadCodec, - tagsCodec = new TagsCodec.SqlServerTagsCodec(connectionFactorySettings.config), + tagsCodec = new TagsCodec.SqlServerTagsCodec(connectionFactorySettings.head.config), timestampCodec = TimestampCodec.SqlServerTimestampCodec, queryAdapter = SqlServerQueryAdapter) case "h2" => @@ -144,7 +179,8 @@ object R2dbcSettings { durableStateTableByEntityType, durableStateAdditionalColumnClasses, durableStateChangeHandlerClasses, - useAppTimestamp) + useAppTimestamp, + numberOfDataPartitions) // let the dialect trump settings that does not make sense for it settingsFromConfig.connectionFactorySettings.dialect.adaptSettings(settingsFromConfig) @@ -154,6 +190,24 @@ object R2dbcSettings { import akka.util.ccompat.JavaConverters._ cfg.root.unwrapped.asScala.toMap.map { case (k, v) => k -> v.toString } } + + /** + * The config paths for the connection factories that are used for the given number of data partitions and databases. + */ + def connectionFactoryConfigPaths( + baseConfigPath: String, + numberOfDataPartitions: Int, + numberOfDatabases: Int): immutable.IndexedSeq[String] = { + if (numberOfDatabases == 1) { + Vector(baseConfigPath) + } else { + val rangeSize = numberOfDataPartitions / numberOfDatabases + (0 until numberOfDatabases).map { i => + s"$baseConfigPath-${i * rangeSize}-${i * rangeSize + rangeSize - 1}" + } + } + } + } /** @@ -173,20 +227,73 @@ final class R2dbcSettings private ( val cleanupSettings: CleanupSettings, /** INTERNAL API */ @InternalApi private[akka] val codecSettings: CodecSettings, - _connectionFactorySettings: ConnectionFactorySettings, + _connectionFactorySettings: immutable.IndexedSeq[ConnectionFactorySettings], _durableStateTableByEntityType: Map[String, String], _durableStateAdditionalColumnClasses: Map[String, immutable.IndexedSeq[String]], _durableStateChangeHandlerClasses: Map[String, String], - _useAppTimestamp: Boolean) { + _useAppTimestamp: Boolean, + val numberOfDataPartitions: Int) { + import R2dbcSettings.NumberOfSlices + /** + * The journal table and schema name without data partition suffix. + */ val journalTableWithSchema: String = schema.map(_ + ".").getOrElse("") + journalTable + + /** + * The journal table and schema name with data partition suffix for the given slice. When number-of-partitions is 1 + * the table name is without suffix. + */ + def journalTableWithSchema(slice: Int): String = { + if (numberOfDataPartitions == 1) + journalTableWithSchema + else + s"${journalTableWithSchema}_${dataPartition(slice)}" + } + val snapshotsTableWithSchema: String = schema.map(_ + ".").getOrElse("") + snapshotsTable val durableStateTableWithSchema: String = schema.map(_ + ".").getOrElse("") + durableStateTable + /** + * INTERNAL API: All journal tables and their the lower slice + */ + @InternalApi private[akka] val allJournalTablesWithSchema: Map[String, Int] = { + (0 until NumberOfSlices).foldLeft(Map.empty[String, Int]) { case (acc, slice) => + val table = journalTableWithSchema(slice) + if (acc.contains(table)) acc + else acc.updated(table, slice) + } + } + + val numberOfDatabases: Int = _connectionFactorySettings.size + + val dataPartitionSliceRanges: immutable.IndexedSeq[Range] = { + val rangeSize = NumberOfSlices / numberOfDataPartitions + (0 until numberOfDataPartitions).map { i => + (i * rangeSize until i * rangeSize + rangeSize) + }.toVector + } + + val connectionFactorSliceRanges: immutable.IndexedSeq[Range] = { + val rangeSize = NumberOfSlices / numberOfDatabases + (0 until numberOfDatabases).map { i => + (i * rangeSize until i * rangeSize + rangeSize) + }.toVector + } + + /** + * INTERNAL API + */ + @InternalApi private[akka] def isSliceRangeWithinSameDataPartition(minSlice: Int, maxSlice: Int): Boolean = + numberOfDataPartitions == 1 || dataPartition(minSlice) == dataPartition(maxSlice) + + private def dataPartition(slice: Int): Int = + slice / (NumberOfSlices / numberOfDataPartitions) + /** * One of the supported dialects 'postgres', 'yugabyte', 'sqlserver' or 'h2' */ - def dialectName: String = _connectionFactorySettings.dialect.name + def dialectName: String = connectionFactorySettings.dialect.name def getDurableStateTable(entityType: String): String = _durableStateTableByEntityType.getOrElse(entityType, durableStateTable) @@ -235,7 +342,30 @@ final class R2dbcSettings private ( /** * INTERNAL API */ - @InternalApi private[akka] def connectionFactorySettings: ConnectionFactorySettings = _connectionFactorySettings + @InternalApi private[akka] def connectionFactorySettings: ConnectionFactorySettings = + connectionFactorySettings(0) + + /** + * INTERNAL API + */ + @InternalApi private[akka] def connectionFactorySettings(slice: Int): ConnectionFactorySettings = { + val rangeSize = numberOfDataPartitions / numberOfDatabases + val i = dataPartition(slice) / rangeSize + _connectionFactorySettings(i) + } + + /** + * INTERNAL API + */ + @InternalApi private[akka] def resolveConnectionFactoryConfigPath(baseConfigPath: String, slice: Int): String = { + if (numberOfDatabases == 1) { + baseConfigPath + } else { + val rangeSize = numberOfDataPartitions / numberOfDatabases + val i = dataPartition(slice) / rangeSize + s"$baseConfigPath-${i * rangeSize}-${i * rangeSize + rangeSize - 1}" + } + } private def copy( schema: Option[String] = schema, @@ -249,12 +379,13 @@ final class R2dbcSettings private ( dbTimestampMonotonicIncreasing: Boolean = dbTimestampMonotonicIncreasing, cleanupSettings: CleanupSettings = cleanupSettings, codecSettings: CodecSettings = codecSettings, - connectionFactorySettings: ConnectionFactorySettings = connectionFactorySettings, + connectionFactorySettings: immutable.IndexedSeq[ConnectionFactorySettings] = _connectionFactorySettings, durableStateTableByEntityType: Map[String, String] = _durableStateTableByEntityType, durableStateAdditionalColumnClasses: Map[String, immutable.IndexedSeq[String]] = _durableStateAdditionalColumnClasses, durableStateChangeHandlerClasses: Map[String, String] = _durableStateChangeHandlerClasses, - useAppTimestamp: Boolean = _useAppTimestamp): R2dbcSettings = + useAppTimestamp: Boolean = _useAppTimestamp, + numberOfDataPartitions: Int = numberOfDataPartitions): R2dbcSettings = new R2dbcSettings( schema, journalTable, @@ -268,13 +399,14 @@ final class R2dbcSettings private ( cleanupSettings, codecSettings, connectionFactorySettings, - _durableStateTableByEntityType, - _durableStateAdditionalColumnClasses, - _durableStateChangeHandlerClasses, - useAppTimestamp) + durableStateTableByEntityType, + durableStateAdditionalColumnClasses, + durableStateChangeHandlerClasses, + useAppTimestamp, + numberOfDataPartitions) override def toString = - s"R2dbcSettings(dialectName=$dialectName, schema=$schema, journalTable=$journalTable, snapshotsTable=$snapshotsTable, durableStateTable=$durableStateTable, logDbCallsExceeding=$logDbCallsExceeding, dbTimestampMonotonicIncreasing=$dbTimestampMonotonicIncreasing, useAppTimestamp=$useAppTimestamp)" + s"R2dbcSettings(dialectName=$dialectName, schema=$schema, journalTable=$journalTable, snapshotsTable=$snapshotsTable, durableStateTable=$durableStateTable, logDbCallsExceeding=$logDbCallsExceeding, dbTimestampMonotonicIncreasing=$dbTimestampMonotonicIncreasing, useAppTimestamp=$useAppTimestamp, numberOfDataPartitions=$numberOfDataPartitions)" } /** diff --git a/core/src/main/scala/akka/persistence/r2dbc/cleanup/scaladsl/DurableStateCleanup.scala b/core/src/main/scala/akka/persistence/r2dbc/cleanup/scaladsl/DurableStateCleanup.scala index ab39e4e6..0572792f 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/cleanup/scaladsl/DurableStateCleanup.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/cleanup/scaladsl/DurableStateCleanup.scala @@ -9,6 +9,8 @@ import scala.concurrent.Future import scala.util.Failure import scala.util.Success +import org.slf4j.LoggerFactory + import akka.Done import akka.actor.ClassicActorSystemProvider import akka.actor.typed.ActorSystem @@ -16,10 +18,8 @@ import akka.actor.typed.scaladsl.LoggerOps import akka.annotation.ApiMayChange import akka.annotation.InternalApi import akka.dispatch.ExecutionContexts -import akka.persistence.r2dbc.ConnectionFactoryProvider import akka.persistence.r2dbc.R2dbcSettings -import akka.persistence.r2dbc.internal.DurableStateDao -import org.slf4j.LoggerFactory +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider /** * Scala API: Tool for deleting durable state for a given list of `persistenceIds` without using `DurableStateBehavior` @@ -57,9 +57,9 @@ final class DurableStateCleanup(systemProvider: ClassicActorSystemProvider, conf private val sharedConfigPath = configPath.replaceAll("""\.cleanup$""", "") private val settings = R2dbcSettings(system.settings.config.getConfig(sharedConfigPath)) - private val connectionFactory = - ConnectionFactoryProvider(system).connectionFactoryFor(sharedConfigPath + ".connection-factory") - private val stateDao = settings.connectionFactorySettings.dialect.createDurableStateDao(settings, connectionFactory) + private val executorProvider = + new R2dbcExecutorProvider(settings, sharedConfigPath + ".connection-factory", LoggerFactory.getLogger(getClass)) + private val stateDao = settings.connectionFactorySettings.dialect.createDurableStateDao(settings, executorProvider) /** * Delete the state related to one single `persistenceId`. diff --git a/core/src/main/scala/akka/persistence/r2dbc/cleanup/scaladsl/EventSourcedCleanup.scala b/core/src/main/scala/akka/persistence/r2dbc/cleanup/scaladsl/EventSourcedCleanup.scala index 7e3932c2..9244ad1e 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/cleanup/scaladsl/EventSourcedCleanup.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/cleanup/scaladsl/EventSourcedCleanup.scala @@ -11,6 +11,8 @@ import scala.concurrent.Future import scala.util.Failure import scala.util.Success +import org.slf4j.LoggerFactory + import akka.Done import akka.actor.ClassicActorSystemProvider import akka.actor.typed.ActorSystem @@ -18,11 +20,8 @@ import akka.actor.typed.scaladsl.LoggerOps import akka.annotation.ApiMayChange import akka.annotation.InternalApi import akka.persistence.SnapshotSelectionCriteria -import akka.persistence.r2dbc.ConnectionFactoryProvider import akka.persistence.r2dbc.R2dbcSettings -import akka.persistence.r2dbc.internal.JournalDao -import akka.persistence.r2dbc.internal.SnapshotDao -import org.slf4j.LoggerFactory +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider /** * Scala API: Tool for deleting all events and/or snapshots for a given list of `persistenceIds` without using @@ -60,10 +59,10 @@ final class EventSourcedCleanup(systemProvider: ClassicActorSystemProvider, conf private val sharedConfigPath = configPath.replaceAll("""\.cleanup$""", "") private val settings = R2dbcSettings(system.settings.config.getConfig(sharedConfigPath)) - private val connectionFactory = - ConnectionFactoryProvider(system).connectionFactoryFor(sharedConfigPath + ".connection-factory") - private val journalDao = settings.connectionFactorySettings.dialect.createJournalDao(settings, connectionFactory) - private val snapshotDao = settings.connectionFactorySettings.dialect.createSnapshotDao(settings, connectionFactory) + private val executorProvider = + new R2dbcExecutorProvider(settings, sharedConfigPath + ".connection-factory", LoggerFactory.getLogger(getClass)) + private val journalDao = settings.connectionFactorySettings.dialect.createJournalDao(settings, executorProvider) + private val snapshotDao = settings.connectionFactorySettings.dialect.createSnapshotDao(settings, executorProvider) /** * Delete all events before a sequenceNr for the given persistence id. Snapshots are not deleted. diff --git a/core/src/main/scala/akka/persistence/r2dbc/internal/BySliceQuery.scala b/core/src/main/scala/akka/persistence/r2dbc/internal/BySliceQuery.scala index eaa88009..e5370e1c 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/internal/BySliceQuery.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/internal/BySliceQuery.scala @@ -150,7 +150,7 @@ import org.slf4j.Logger } trait Dao[SerializedRow] { - def currentDbTimestamp(): Future[Instant] + def currentDbTimestamp(slice: Int): Future[Instant] def rowsBySlices( entityType: String, @@ -262,7 +262,7 @@ import org.slf4j.Logger val currentTimestamp = if (settings.useAppTimestamp) Future.successful(InstantFactory.now()) - else dao.currentDbTimestamp() + else dao.currentDbTimestamp(minSlice) Source .futureSource[Envelope, NotUsed] { diff --git a/core/src/main/scala/akka/persistence/r2dbc/internal/Dialect.scala b/core/src/main/scala/akka/persistence/r2dbc/internal/Dialect.scala index 6562ce42..0b2636d6 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/internal/Dialect.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/internal/Dialect.scala @@ -31,15 +31,15 @@ private[r2dbc] trait Dialect { def createConnectionFactory(config: Config): ConnectionFactory - def createJournalDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit + def createJournalDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit system: ActorSystem[_]): JournalDao - def createQueryDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit + def createQueryDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit system: ActorSystem[_]): QueryDao - def createSnapshotDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit + def createSnapshotDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit system: ActorSystem[_]): SnapshotDao - def createDurableStateDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit + def createDurableStateDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit system: ActorSystem[_]): DurableStateDao } diff --git a/core/src/main/scala/akka/persistence/r2dbc/internal/R2dbcExecutor.scala b/core/src/main/scala/akka/persistence/r2dbc/internal/R2dbcExecutor.scala index e89126aa..537a370a 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/internal/R2dbcExecutor.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/internal/R2dbcExecutor.scala @@ -7,6 +7,7 @@ package akka.persistence.r2dbc.internal import java.util.function.BiConsumer import scala.collection.immutable +import scala.collection.immutable.IntMap import scala.collection.mutable import scala.compat.java8.FutureConverters._ import scala.concurrent.ExecutionContext @@ -34,6 +35,9 @@ import org.slf4j.Logger import reactor.core.publisher.Flux import reactor.core.publisher.Mono +import akka.persistence.r2dbc.ConnectionFactoryProvider +import akka.persistence.r2dbc.R2dbcSettings + /** * INTERNAL API */ @@ -383,3 +387,33 @@ class R2dbcExecutor( Future.successful(Done) } } + +/** + * INTERNAL API + */ +@InternalStableApi class R2dbcExecutorProvider( + val settings: R2dbcSettings, + connectionFactoryBaseConfigPath: String, + log: Logger)(implicit ec: ExecutionContext, system: ActorSystem[_]) { + private val connectionFactoryProvider = ConnectionFactoryProvider(system) + private var cache = IntMap.empty[R2dbcExecutor] + + def executorFor(slice: Int): R2dbcExecutor = { + cache.get(slice) match { + case Some(executor) => executor + case None => + val connectionFactoryConfigPath = + settings.resolveConnectionFactoryConfigPath(connectionFactoryBaseConfigPath, slice) + val connectionFactory = connectionFactoryProvider.connectionFactoryFor(connectionFactoryConfigPath) + val executor = new R2dbcExecutor( + connectionFactory, + log, + settings.logDbCallsExceeding, + settings.connectionFactorySettings.poolSettings.closeCallsExceeding) + // it's just a cache so no need for guarding concurrent updates or visibility + cache = cache.updated(slice, executor) + executor + } + } + +} diff --git a/core/src/main/scala/akka/persistence/r2dbc/internal/h2/H2Dialect.scala b/core/src/main/scala/akka/persistence/r2dbc/internal/h2/H2Dialect.scala index 2cf19ef3..5764fb74 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/internal/h2/H2Dialect.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/internal/h2/H2Dialect.scala @@ -24,6 +24,7 @@ import java.util.Locale import scala.concurrent.ExecutionContext +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider import akka.persistence.r2dbc.internal.codec.IdentityAdapter import akka.persistence.r2dbc.internal.codec.QueryAdapter @@ -36,6 +37,8 @@ private[r2dbc] object H2Dialect extends Dialect { override def name: String = "h2" override def adaptSettings(settings: R2dbcSettings): R2dbcSettings = { + if (settings.numberOfDatabases > 1) + throw new IllegalArgumentException("H2 dialect doesn't support more than one data-partition.number-of-databases") val res = settings // app timestamp is db timestamp because same process .withUseAppTimestamp(true) @@ -84,21 +87,21 @@ private[r2dbc] object H2Dialect extends Dialect { new H2ConnectionFactory(h2Config) } - override def createJournalDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit + override def createJournalDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit system: ActorSystem[_]): JournalDao = - new H2JournalDao(settings, connectionFactory)(ecForDaos(system, settings), system) + new H2JournalDao(settings, executorProvider)(ecForDaos(system, settings), system) - override def createSnapshotDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit + override def createSnapshotDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit system: ActorSystem[_]): SnapshotDao = - new H2SnapshotDao(settings, connectionFactory)(ecForDaos(system, settings), system) + new H2SnapshotDao(settings, executorProvider)(ecForDaos(system, settings), system) - override def createQueryDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit + override def createQueryDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit system: ActorSystem[_]): QueryDao = - new H2QueryDao(settings, connectionFactory)(ecForDaos(system, settings), system) + new H2QueryDao(settings, executorProvider)(ecForDaos(system, settings), system) - override def createDurableStateDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit + override def createDurableStateDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit system: ActorSystem[_]): DurableStateDao = - new H2DurableStateDao(settings, connectionFactory, this)(ecForDaos(system, settings), system) + new H2DurableStateDao(settings, executorProvider, this)(ecForDaos(system, settings), system) private def ecForDaos(system: ActorSystem[_], settings: R2dbcSettings): ExecutionContext = { // H2 R2DBC driver blocks in surprising places (Mono.toFuture in stmt.execute().asFuture()) @@ -113,8 +116,16 @@ private[r2dbc] object H2Dialect extends Dialect { else Some(s) } val schema = optionalConfString("schema") + val numberOfDataPartitions = config.getInt("number-of-partitions") val journalTable = config.getString("journal-table") val journalTableWithSchema = schema.map(_ + ".").getOrElse("") + journalTable + val allJournalTablesWithSchema = + if (numberOfDataPartitions == 1) + Vector(journalTableWithSchema) + else + (0 until numberOfDataPartitions).map { dataPartition => + s"${journalTableWithSchema}_$dataPartition" + } val snapshotTable = config.getString("snapshot-table") val snapshotTableWithSchema = schema.map(_ + ".").getOrElse("") + snapshotTable val durableStateTable = config.getString("state-table") @@ -123,17 +134,20 @@ private[r2dbc] object H2Dialect extends Dialect { implicit val queryAdapter: QueryAdapter = IdentityAdapter val sliceIndexes = if (createSliceIndexes) { - val sliceIndexWithSchema = journalTableWithSchema + "_slice_idx" + val journalSliceIndexes = allJournalTablesWithSchema.map { table => + val sliceIndexWithSchema = table + "_slice_idx" + sql"""CREATE INDEX IF NOT EXISTS $sliceIndexWithSchema ON $table(slice, entity_type, db_timestamp, seq_nr)""" + } val snapshotSliceIndexWithSchema = snapshotTableWithSchema + "_slice_idx" val durableStateSliceIndexWithSchema = durableStateTableWithSchema + "_slice_idx" + journalSliceIndexes ++ Seq( - sql"""CREATE INDEX IF NOT EXISTS $sliceIndexWithSchema ON $journalTableWithSchema(slice, entity_type, db_timestamp, seq_nr)""", sql"""CREATE INDEX IF NOT EXISTS $snapshotSliceIndexWithSchema ON $snapshotTableWithSchema(slice, entity_type, db_timestamp)""", sql"""CREATE INDEX IF NOT EXISTS $durableStateSliceIndexWithSchema ON durable_state(slice, entity_type, db_timestamp, revision)""") } else Seq.empty[String] - (Seq( - sql"""CREATE TABLE IF NOT EXISTS $journalTableWithSchema ( + val createJournalTables = allJournalTablesWithSchema.map { table => + sql"""CREATE TABLE IF NOT EXISTS $table ( slice INT NOT NULL, entity_type VARCHAR(255) NOT NULL, persistence_id VARCHAR(255) NOT NULL, @@ -154,7 +168,11 @@ private[r2dbc] object H2Dialect extends Dialect { meta_payload BYTEA, PRIMARY KEY(persistence_id, seq_nr) - )""", + )""" + } + + (createJournalTables ++ + Seq( sql""" CREATE TABLE IF NOT EXISTS $snapshotTableWithSchema ( slice INT NOT NULL, @@ -188,7 +206,9 @@ private[r2dbc] object H2Dialect extends Dialect { PRIMARY KEY(persistence_id, revision) ) - """) ++ sliceIndexes ++ (if (additionalInit.trim.nonEmpty) Seq(additionalInit) else Seq.empty[String])) + """) ++ + sliceIndexes ++ + (if (additionalInit.trim.nonEmpty) Seq(additionalInit) else Seq.empty[String])) .mkString(";") // r2dbc h2 driver replaces with '\;' as needed for INIT } } diff --git a/core/src/main/scala/akka/persistence/r2dbc/internal/h2/H2DurableStateDao.scala b/core/src/main/scala/akka/persistence/r2dbc/internal/h2/H2DurableStateDao.scala index 579401a4..e25d1382 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/internal/h2/H2DurableStateDao.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/internal/h2/H2DurableStateDao.scala @@ -8,7 +8,6 @@ import scala.concurrent.ExecutionContext import scala.concurrent.duration.Duration import scala.concurrent.duration.FiniteDuration -import io.r2dbc.spi.ConnectionFactory import org.slf4j.Logger import org.slf4j.LoggerFactory @@ -16,6 +15,7 @@ import akka.actor.typed.ActorSystem import akka.annotation.InternalApi import akka.persistence.r2dbc.R2dbcSettings import akka.persistence.r2dbc.internal.Dialect +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider import akka.persistence.r2dbc.internal.postgres.PostgresDurableStateDao /** @@ -24,9 +24,9 @@ import akka.persistence.r2dbc.internal.postgres.PostgresDurableStateDao @InternalApi private[r2dbc] final class H2DurableStateDao( settings: R2dbcSettings, - connectionFactory: ConnectionFactory, + executorProvider: R2dbcExecutorProvider, dialect: Dialect)(implicit ec: ExecutionContext, system: ActorSystem[_]) - extends PostgresDurableStateDao(settings, connectionFactory, dialect) { + extends PostgresDurableStateDao(settings, executorProvider, dialect) { override protected lazy val log: Logger = LoggerFactory.getLogger(classOf[H2DurableStateDao]) diff --git a/core/src/main/scala/akka/persistence/r2dbc/internal/h2/H2JournalDao.scala b/core/src/main/scala/akka/persistence/r2dbc/internal/h2/H2JournalDao.scala index 98e55d53..cf577310 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/internal/h2/H2JournalDao.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/internal/h2/H2JournalDao.scala @@ -12,7 +12,6 @@ import akka.persistence.r2dbc.internal.JournalDao import akka.persistence.r2dbc.internal.codec.PayloadCodec.RichStatement import akka.persistence.r2dbc.internal.Sql.InterpolationWithAdapter import akka.persistence.r2dbc.internal.postgres.PostgresJournalDao -import io.r2dbc.spi.ConnectionFactory import io.r2dbc.spi.Statement import org.slf4j.Logger import org.slf4j.LoggerFactory @@ -24,15 +23,16 @@ import scala.concurrent.Future import io.r2dbc.spi.Connection import akka.persistence.r2dbc.internal.R2dbcExecutor +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider /** * INTERNAL API */ @InternalApi -private[r2dbc] class H2JournalDao(journalSettings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit +private[r2dbc] class H2JournalDao(journalSettings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit ec: ExecutionContext, system: ActorSystem[_]) - extends PostgresJournalDao(journalSettings, connectionFactory) { + extends PostgresJournalDao(journalSettings, executorProvider) { import JournalDao.SerializedJournalRow import journalSettings.codecSettings.JournalImplicits._ override protected lazy val log: Logger = LoggerFactory.getLogger(classOf[H2JournalDao]) @@ -40,7 +40,7 @@ private[r2dbc] class H2JournalDao(journalSettings: R2dbcSettings, connectionFact require(journalSettings.useAppTimestamp) require(journalSettings.dbTimestampMonotonicIncreasing) - private val insertSql = sql"INSERT INTO $journalTable " + + private def insertSql(slice: Int) = sql"INSERT INTO ${journalTable(slice)} " + "(slice, entity_type, persistence_id, seq_nr, writer, adapter_manifest, event_ser_id, event_ser_manifest, event_payload, tags, meta_ser_id, meta_ser_manifest, meta_payload, db_timestamp) " + "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)" @@ -59,15 +59,17 @@ private[r2dbc] class H2JournalDao(journalSettings: R2dbcSettings, connectionFact // it's always the same persistenceId for all events val persistenceId = events.head.persistenceId + val slice = persistenceExt.sliceForPersistenceId(persistenceId) + val executor = executorProvider.executorFor(slice) val totalEvents = events.size val result = if (totalEvents == 1) { - r2dbcExecutor.updateOne(s"insert [$persistenceId]")(connection => - bindInsertStatement(connection.createStatement(insertSql), events.head)) + executor.updateOne(s"insert [$persistenceId]")(connection => + bindInsertStatement(connection.createStatement(insertSql(slice)), events.head)) } else { - r2dbcExecutor.updateInBatch(s"batch insert [$persistenceId], [$totalEvents] events")(connection => - events.foldLeft(connection.createStatement(insertSql)) { (stmt, write) => + executor.updateInBatch(s"batch insert [$persistenceId], [$totalEvents] events")(connection => + events.foldLeft(connection.createStatement(insertSql(slice))) { (stmt, write) => stmt.add() bindInsertStatement(stmt, write) }) @@ -82,8 +84,9 @@ private[r2dbc] class H2JournalDao(journalSettings: R2dbcSettings, connectionFact override def writeEventInTx(event: SerializedJournalRow, connection: Connection): Future[Instant] = { val persistenceId = event.persistenceId + val slice = persistenceExt.sliceForPersistenceId(persistenceId) - val stmt = bindInsertStatement(connection.createStatement(insertSql), event) + val stmt = bindInsertStatement(connection.createStatement(insertSql(slice)), event) val result = R2dbcExecutor.updateOneInTx(stmt) if (log.isDebugEnabled()) diff --git a/core/src/main/scala/akka/persistence/r2dbc/internal/h2/H2QueryDao.scala b/core/src/main/scala/akka/persistence/r2dbc/internal/h2/H2QueryDao.scala index 00a856ce..e64d51e9 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/internal/h2/H2QueryDao.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/internal/h2/H2QueryDao.scala @@ -13,19 +13,20 @@ import io.r2dbc.spi.ConnectionFactory import io.r2dbc.spi.Row import org.slf4j.Logger import org.slf4j.LoggerFactory - import scala.concurrent.ExecutionContext import scala.concurrent.duration.Duration import scala.concurrent.duration.FiniteDuration +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider + /** * INTERNAL API */ @InternalApi -private[r2dbc] class H2QueryDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit +private[r2dbc] class H2QueryDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit ec: ExecutionContext, system: ActorSystem[_]) - extends PostgresQueryDao(settings, connectionFactory) { + extends PostgresQueryDao(settings, executorProvider) { import settings.codecSettings.JournalImplicits._ override protected lazy val log: Logger = LoggerFactory.getLogger(classOf[H2QueryDao]) @@ -53,7 +54,7 @@ private[r2dbc] class H2QueryDao(settings: R2dbcSettings, connectionFactory: Conn sql""" $selectColumns - FROM $journalTable + FROM ${journalTable(minSlice)} WHERE entity_type = ? AND ${sliceCondition(minSlice, maxSlice)} AND db_timestamp >= ? $toDbTimestampParamCondition $behindCurrentTimeIntervalCondition diff --git a/core/src/main/scala/akka/persistence/r2dbc/internal/h2/H2SnapshotDao.scala b/core/src/main/scala/akka/persistence/r2dbc/internal/h2/H2SnapshotDao.scala index 78178101..cf3c6279 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/internal/h2/H2SnapshotDao.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/internal/h2/H2SnapshotDao.scala @@ -16,14 +16,16 @@ import scala.concurrent.ExecutionContext import io.r2dbc.spi.Row +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider + /** * INTERNAL API */ @InternalApi -private[r2dbc] final class H2SnapshotDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit +private[r2dbc] final class H2SnapshotDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit ec: ExecutionContext, system: ActorSystem[_]) - extends PostgresSnapshotDao(settings, connectionFactory) { + extends PostgresSnapshotDao(settings, executorProvider) { import settings.codecSettings.SnapshotImplicits._ override protected lazy val log: Logger = LoggerFactory.getLogger(classOf[H2SnapshotDao]) diff --git a/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/PostgresDialect.scala b/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/PostgresDialect.scala index d8dad720..90529c9b 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/PostgresDialect.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/PostgresDialect.scala @@ -25,6 +25,8 @@ import io.r2dbc.spi.ConnectionFactories import io.r2dbc.spi.ConnectionFactory import io.r2dbc.spi.ConnectionFactoryOptions +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider + /** * INTERNAL API */ @@ -115,19 +117,19 @@ private[r2dbc] object PostgresDialect extends Dialect { ConnectionFactories.get(builder.build()) } - override def createJournalDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit + override def createJournalDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit system: ActorSystem[_]): JournalDao = - new PostgresJournalDao(settings, connectionFactory)(system.executionContext, system) + new PostgresJournalDao(settings, executorProvider)(system.executionContext, system) - override def createSnapshotDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit + override def createSnapshotDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit system: ActorSystem[_]): SnapshotDao = - new PostgresSnapshotDao(settings, connectionFactory)(system.executionContext, system) + new PostgresSnapshotDao(settings, executorProvider)(system.executionContext, system) - override def createQueryDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit + override def createQueryDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit system: ActorSystem[_]): QueryDao = - new PostgresQueryDao(settings, connectionFactory)(system.executionContext, system) + new PostgresQueryDao(settings, executorProvider)(system.executionContext, system) - override def createDurableStateDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit + override def createDurableStateDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit system: ActorSystem[_]): DurableStateDao = - new PostgresDurableStateDao(settings, connectionFactory, this)(system.executionContext, system) + new PostgresDurableStateDao(settings, executorProvider, this)(system.executionContext, system) } diff --git a/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/PostgresDurableStateDao.scala b/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/PostgresDurableStateDao.scala index 21a7f326..7042a4a7 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/PostgresDurableStateDao.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/PostgresDurableStateDao.scala @@ -47,6 +47,7 @@ import akka.persistence.r2dbc.internal.JournalDao.SerializedJournalRow import akka.persistence.r2dbc.internal.codec.PayloadCodec.RichRow import akka.persistence.r2dbc.internal.codec.PayloadCodec.RichStatement import akka.persistence.r2dbc.internal.R2dbcExecutor +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider import akka.persistence.r2dbc.internal.Sql.InterpolationWithAdapter import akka.persistence.r2dbc.internal.codec.TagsCodec.TagsCodecRichStatement import akka.persistence.r2dbc.internal.codec.TimestampCodec.TimestampCodecRichRow @@ -80,7 +81,7 @@ private[r2dbc] object PostgresDurableStateDao { @InternalApi private[r2dbc] class PostgresDurableStateDao( settings: R2dbcSettings, - connectionFactory: ConnectionFactory, + executorProvider: R2dbcExecutorProvider, dialect: Dialect)(implicit ec: ExecutionContext, system: ActorSystem[_]) extends DurableStateDao { import DurableStateDao._ @@ -89,14 +90,10 @@ private[r2dbc] class PostgresDurableStateDao( protected def log: Logger = PostgresDurableStateDao.log private val persistenceExt = Persistence(system) - protected val r2dbcExecutor = new R2dbcExecutor( - connectionFactory, - log, - settings.logDbCallsExceeding, - settings.connectionFactorySettings.poolSettings.closeCallsExceeding)(ec, system) + protected val r2dbcExecutor = executorProvider.executorFor(slice = 0) // FIXME support data partitions // used for change events - private lazy val journalDao: JournalDao = dialect.createJournalDao(settings, connectionFactory) + private lazy val journalDao: JournalDao = dialect.createJournalDao(settings, executorProvider) private lazy val additionalColumns: Map[String, immutable.IndexedSeq[AdditionalColumn[Any, Any]]] = { settings.durableStateAdditionalColumnClasses.map { case (entityType, columnClasses) => @@ -649,7 +646,7 @@ private[r2dbc] class PostgresDurableStateDao( result.map(_ => Done)(ExecutionContexts.parasitic) } - override def currentDbTimestamp(): Future[Instant] = { + override def currentDbTimestamp(slice: Int): Future[Instant] = { r2dbcExecutor .selectOne("select current db timestamp")( connection => connection.createStatement(currentDbTimestampSql), diff --git a/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/PostgresJournalDao.scala b/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/PostgresJournalDao.scala index a5349948..71d2c58a 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/PostgresJournalDao.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/PostgresJournalDao.scala @@ -20,7 +20,6 @@ import akka.persistence.r2dbc.internal.codec.TimestampCodec.TimestampCodecRichRo import akka.persistence.r2dbc.internal.codec.TimestampCodec.TimestampCodecRichStatement import akka.persistence.typed.PersistenceId import io.r2dbc.spi.Connection -import io.r2dbc.spi.ConnectionFactory import io.r2dbc.spi.Row import io.r2dbc.spi.Statement import org.slf4j.Logger @@ -29,8 +28,9 @@ import java.time.Instant import scala.concurrent.ExecutionContext import scala.concurrent.Future -import akka.persistence.r2dbc.internal.codec.PayloadCodec +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider +import akka.persistence.r2dbc.internal.codec.PayloadCodec import akka.persistence.r2dbc.internal.codec.QueryAdapter import akka.persistence.r2dbc.internal.codec.SqlServerQueryAdapter @@ -61,7 +61,8 @@ private[r2dbc] object PostgresJournalDao { * Class for doing db interaction outside of an actor to avoid mistakes in future callbacks */ @InternalApi -private[r2dbc] class PostgresJournalDao(journalSettings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit +private[r2dbc] class PostgresJournalDao(journalSettings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)( + implicit ec: ExecutionContext, system: ActorSystem[_]) extends JournalDao { @@ -69,71 +70,65 @@ private[r2dbc] class PostgresJournalDao(journalSettings: R2dbcSettings, connecti import journalSettings.codecSettings.JournalImplicits._ protected def log: Logger = PostgresJournalDao.log - private val persistenceExt = Persistence(system) - - protected val r2dbcExecutor = - new R2dbcExecutor( - connectionFactory, - log, - journalSettings.logDbCallsExceeding, - journalSettings.connectionFactorySettings.poolSettings.closeCallsExceeding)(ec, system) - - protected val journalTable: String = journalSettings.journalTableWithSchema - - protected val (insertEventWithParameterTimestampSql, insertEventWithTransactionTimestampSql) = { - val baseSql = - s"INSERT INTO $journalTable " + - "(slice, entity_type, persistence_id, seq_nr, writer, adapter_manifest, event_ser_id, event_ser_manifest, event_payload, tags, meta_ser_id, meta_ser_manifest, meta_payload, db_timestamp) " + - "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, " + protected val persistenceExt: Persistence = Persistence(system) - // The subselect of the db_timestamp of previous seqNr for same pid is to ensure that db_timestamp is - // always increasing for a pid (time not going backwards). - // TODO we could skip the subselect when inserting seqNr 1 as a possible optimization - def timestampSubSelect = - s"(SELECT db_timestamp + '1 microsecond'::interval FROM $journalTable " + - "WHERE persistence_id = ? AND seq_nr = ?)" + protected def journalTable(slice: Int): String = journalSettings.journalTableWithSchema(slice) - val insertEventWithParameterTimestampSql = { - if (journalSettings.dbTimestampMonotonicIncreasing) - sql"$baseSql ?) RETURNING db_timestamp" - else - sql"$baseSql GREATEST(?, $timestampSubSelect)) RETURNING db_timestamp" - } + protected def insertEventWithParameterTimestampSql(slice: Int): String = { + val table = journalTable(slice) + val baseSql = insertEvenBaseSql(table) + if (journalSettings.dbTimestampMonotonicIncreasing) + sql"$baseSql ?) RETURNING db_timestamp" + else + sql"$baseSql GREATEST(?, ${timestampSubSelect(table)})) RETURNING db_timestamp" + } - val insertEventWithTransactionTimestampSql = { - if (journalSettings.dbTimestampMonotonicIncreasing) - sql"$baseSql CURRENT_TIMESTAMP) RETURNING db_timestamp" - else - sql"$baseSql GREATEST(CURRENT_TIMESTAMP, $timestampSubSelect)) RETURNING db_timestamp" - } + private def insertEventWithTransactionTimestampSql(slice: Int) = { + val table = journalTable(slice) + val baseSql = insertEvenBaseSql(table) + if (journalSettings.dbTimestampMonotonicIncreasing) + sql"$baseSql CURRENT_TIMESTAMP) RETURNING db_timestamp" + else + sql"$baseSql GREATEST(CURRENT_TIMESTAMP, ${timestampSubSelect(table)})) RETURNING db_timestamp" + } - (insertEventWithParameterTimestampSql, insertEventWithTransactionTimestampSql) + private def insertEvenBaseSql(table: String) = { + s"INSERT INTO $table " + + "(slice, entity_type, persistence_id, seq_nr, writer, adapter_manifest, event_ser_id, event_ser_manifest, event_payload, tags, meta_ser_id, meta_ser_manifest, meta_payload, db_timestamp) " + + "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, " } - private val selectHighestSequenceNrSql = sql""" - SELECT MAX(seq_nr) from $journalTable + // The subselect of the db_timestamp of previous seqNr for same pid is to ensure that db_timestamp is + // always increasing for a pid (time not going backwards). + // TODO we could skip the subselect when inserting seqNr 1 as a possible optimization + private def timestampSubSelect(table: String) = + s"(SELECT db_timestamp + '1 microsecond'::interval FROM $table " + + "WHERE persistence_id = ? AND seq_nr = ?)" + + private def selectHighestSequenceNrSql(slice: Int) = sql""" + SELECT MAX(seq_nr) from ${journalTable(slice)} WHERE persistence_id = ? AND seq_nr >= ?""" - private val selectLowestSequenceNrSql = + private def selectLowestSequenceNrSql(slice: Int) = sql""" - SELECT MIN(seq_nr) from $journalTable + SELECT MIN(seq_nr) from ${journalTable(slice)} WHERE persistence_id = ?""" - private val deleteEventsSql = sql""" - DELETE FROM $journalTable + private def deleteEventsSql(slice: Int) = sql""" + DELETE FROM ${journalTable(slice)} WHERE persistence_id = ? AND seq_nr >= ? AND seq_nr <= ?""" - protected def insertDeleteMarkerSql(timestamp: String = "CURRENT_TIMESTAMP"): String = sql""" - INSERT INTO $journalTable + protected def insertDeleteMarkerSql(slice: Int, timestamp: String = "CURRENT_TIMESTAMP"): String = sql""" + INSERT INTO ${journalTable(slice)} (slice, entity_type, persistence_id, seq_nr, db_timestamp, writer, adapter_manifest, event_ser_id, event_ser_manifest, event_payload, deleted) VALUES (?, ?, ?, ?, $timestamp, ?, ?, ?, ?, ?, ?)""" - private val deleteEventsByPersistenceIdBeforeTimestampSql = sql""" - DELETE FROM $journalTable + private def deleteEventsByPersistenceIdBeforeTimestampSql(slice: Int) = sql""" + DELETE FROM ${journalTable(slice)} WHERE persistence_id = ? AND db_timestamp < ?""" - private val deleteEventsBySliceBeforeTimestampSql = sql""" - DELETE FROM $journalTable + private def deleteEventsBySliceBeforeTimestampSql(slice: Int) = sql""" + DELETE FROM ${journalTable(slice)} WHERE slice = ? AND entity_type = ? AND db_timestamp < ?""" /** @@ -151,18 +146,20 @@ private[r2dbc] class PostgresJournalDao(journalSettings: R2dbcSettings, connecti // it's always the same persistenceId for all events val persistenceId = events.head.persistenceId + val slice = persistenceExt.sliceForPersistenceId(persistenceId) + val executor = executorProvider.executorFor(slice) val previousSeqNr = events.head.seqNr - 1 // The MigrationTool defines the dbTimestamp to preserve the original event timestamp val useTimestampFromDb = events.head.dbTimestamp == Instant.EPOCH val insertSql = - if (useTimestampFromDb) insertEventWithTransactionTimestampSql - else insertEventWithParameterTimestampSql + if (useTimestampFromDb) insertEventWithTransactionTimestampSql(slice) + else insertEventWithParameterTimestampSql(slice) val totalEvents = events.size if (totalEvents == 1) { - val result = r2dbcExecutor.updateOneReturning(s"insert [$persistenceId]")( + val result = executor.updateOneReturning(s"insert [$persistenceId]")( connection => bindInsertStatement(connection.createStatement(insertSql), events.head, useTimestampFromDb, previousSeqNr), row => row.getTimestamp("db_timestamp")) @@ -172,7 +169,7 @@ private[r2dbc] class PostgresJournalDao(journalSettings: R2dbcSettings, connecti } result } else { - val result = r2dbcExecutor.updateInBatchReturning(s"batch insert [$persistenceId], [$totalEvents] events")( + val result = executor.updateInBatchReturning(s"batch insert [$persistenceId], [$totalEvents] events")( connection => events.foldLeft(connection.createStatement(insertSql)) { (stmt, write) => stmt.add() @@ -189,14 +186,15 @@ private[r2dbc] class PostgresJournalDao(journalSettings: R2dbcSettings, connecti override def writeEventInTx(event: SerializedJournalRow, connection: Connection): Future[Instant] = { val persistenceId = event.persistenceId + val slice = persistenceExt.sliceForPersistenceId(persistenceId) val previousSeqNr = event.seqNr - 1 // The MigrationTool defines the dbTimestamp to preserve the original event timestamp val useTimestampFromDb = event.dbTimestamp == Instant.EPOCH val insertSql = - if (useTimestampFromDb) insertEventWithTransactionTimestampSql - else insertEventWithParameterTimestampSql + if (useTimestampFromDb) insertEventWithTransactionTimestampSql(slice) + else insertEventWithParameterTimestampSql(slice) val stmt = bindInsertStatement(connection.createStatement(insertSql), event, useTimestampFromDb, previousSeqNr) val result = R2dbcExecutor.updateOneReturningInTx(stmt, row => row.getTimestamp("db_timestamp")) @@ -262,11 +260,13 @@ private[r2dbc] class PostgresJournalDao(journalSettings: R2dbcSettings, connecti } override def readHighestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] = { - val result = r2dbcExecutor + val slice = persistenceExt.sliceForPersistenceId(persistenceId) + val executor = executorProvider.executorFor(slice) + val result = executor .select(s"select highest seqNr [$persistenceId]")( connection => connection - .createStatement(selectHighestSequenceNrSql) + .createStatement(selectHighestSequenceNrSql(slice)) .bind(0, persistenceId) .bind(1, fromSequenceNr), row => { @@ -282,11 +282,13 @@ private[r2dbc] class PostgresJournalDao(journalSettings: R2dbcSettings, connecti } override def readLowestSequenceNr(persistenceId: String): Future[Long] = { - val result = r2dbcExecutor + val slice = persistenceExt.sliceForPersistenceId(persistenceId) + val executor = executorProvider.executorFor(slice) + val result = executor .select(s"select lowest seqNr [$persistenceId]")( connection => connection - .createStatement(selectLowestSequenceNrSql) + .createStatement(selectLowestSequenceNrSql(slice)) .bind(0, persistenceId), row => { val seqNr = row.get(0, classOf[java.lang.Long]) @@ -316,14 +318,13 @@ private[r2dbc] class PostgresJournalDao(journalSettings: R2dbcSettings, connecti } protected def bindTimestampNow(stmt: Statement, getAndIncIndex: () => Int): Statement = stmt override def deleteEventsTo(persistenceId: String, toSequenceNr: Long, resetSequenceNumber: Boolean): Future[Unit] = { + val slice = persistenceExt.sliceForPersistenceId(persistenceId) + val executor = executorProvider.executorFor(slice) def insertDeleteMarkerStmt(deleteMarkerSeqNr: Long, connection: Connection): Statement = { - val idx = Iterator.range(0, Int.MaxValue) - val entityType = PersistenceId.extractEntityType(persistenceId) - val slice = persistenceExt.sliceForPersistenceId(persistenceId) - val stmt = connection.createStatement(insertDeleteMarkerSql()) + val stmt = connection.createStatement(insertDeleteMarkerSql(slice)) stmt .bind(idx.next(), slice) .bind(idx.next(), entityType) @@ -341,17 +342,17 @@ private[r2dbc] class PostgresJournalDao(journalSettings: R2dbcSettings, connecti def deleteBatch(from: Long, to: Long, lastBatch: Boolean): Future[Unit] = { (if (lastBatch && !resetSequenceNumber) { - r2dbcExecutor + executor .update(s"delete [$persistenceId] and insert marker") { connection => Vector( - connection.createStatement(deleteEventsSql).bind(0, persistenceId).bind(1, from).bind(2, to), + connection.createStatement(deleteEventsSql(slice)).bind(0, persistenceId).bind(1, from).bind(2, to), insertDeleteMarkerStmt(to, connection)) } .map(_.head) } else { - r2dbcExecutor + executor .updateOne(s"delete [$persistenceId]") { connection => - connection.createStatement(deleteEventsSql).bind(0, persistenceId).bind(1, from).bind(2, to) + connection.createStatement(deleteEventsSql(slice)).bind(0, persistenceId).bind(1, from).bind(2, to) } }).map(deletedRows => if (log.isDebugEnabled) { @@ -383,10 +384,12 @@ private[r2dbc] class PostgresJournalDao(journalSettings: R2dbcSettings, connecti } override def deleteEventsBefore(persistenceId: String, timestamp: Instant): Future[Unit] = { - r2dbcExecutor + val slice = persistenceExt.sliceForPersistenceId(persistenceId) + val executor = executorProvider.executorFor(slice) + executor .updateOne(s"delete [$persistenceId]") { connection => connection - .createStatement(deleteEventsByPersistenceIdBeforeTimestampSql) + .createStatement(deleteEventsByPersistenceIdBeforeTimestampSql(slice)) .bind(0, persistenceId) .bindTimestamp(1, timestamp) } @@ -396,10 +399,11 @@ private[r2dbc] class PostgresJournalDao(journalSettings: R2dbcSettings, connecti } override def deleteEventsBefore(entityType: String, slice: Int, timestamp: Instant): Future[Unit] = { - r2dbcExecutor + val executor = executorProvider.executorFor(slice) + executor .updateOne(s"delete [$entityType]") { connection => connection - .createStatement(deleteEventsBySliceBeforeTimestampSql) + .createStatement(deleteEventsBySliceBeforeTimestampSql(slice)) .bind(0, slice) .bind(1, entityType) .bindTimestamp(2, timestamp) diff --git a/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/PostgresQueryDao.scala b/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/PostgresQueryDao.scala index 1f51e81c..d7b3aaa1 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/PostgresQueryDao.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/PostgresQueryDao.scala @@ -4,6 +4,7 @@ package akka.persistence.r2dbc.internal.postgres +import scala.collection.immutable import java.time.Instant import scala.concurrent.ExecutionContext @@ -22,18 +23,21 @@ import akka.persistence.r2dbc.internal.InstantFactory import akka.persistence.r2dbc.internal.JournalDao.SerializedJournalRow import akka.persistence.r2dbc.internal.codec.PayloadCodec.RichRow import akka.persistence.r2dbc.internal.QueryDao -import akka.persistence.r2dbc.internal.R2dbcExecutor import akka.persistence.r2dbc.internal.Sql.InterpolationWithAdapter import akka.persistence.r2dbc.internal.codec.TagsCodec.TagsCodecRichRow import akka.persistence.r2dbc.internal.codec.TimestampCodec.TimestampCodecRichRow import akka.persistence.r2dbc.internal.codec.TimestampCodec.TimestampCodecRichStatement +import akka.persistence.Persistence import akka.persistence.typed.PersistenceId import akka.stream.scaladsl.Source -import io.r2dbc.spi.ConnectionFactory import io.r2dbc.spi.Statement import org.slf4j.Logger import org.slf4j.LoggerFactory +import akka.persistence.Persistence +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider +import akka.persistence.r2dbc.internal.codec.PayloadCodec + /** * INTERNAL API */ @@ -46,7 +50,7 @@ private[r2dbc] object PostgresQueryDao { * INTERNAL API */ @InternalApi -private[r2dbc] class PostgresQueryDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit +private[r2dbc] class PostgresQueryDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit ec: ExecutionContext, system: ActorSystem[_]) extends QueryDao { @@ -54,7 +58,8 @@ private[r2dbc] class PostgresQueryDao(settings: R2dbcSettings, connectionFactory import settings.codecSettings.JournalImplicits._ protected def log: Logger = PostgresQueryDao.log - protected val journalTable: String = settings.journalTableWithSchema + protected val persistenceExt: Persistence = Persistence(system) + protected def journalTable(slice: Int): String = settings.journalTableWithSchema(slice) protected def sqlFalse: String = "false" protected def sqlDbTimestamp = "CURRENT_TIMESTAMP" @@ -85,7 +90,7 @@ private[r2dbc] class PostgresQueryDao(settings: R2dbcSettings, connectionFactory sql""" $selectColumns - FROM $journalTable + FROM ${journalTable(minSlice)} WHERE entity_type = ? AND ${sliceCondition(minSlice, maxSlice)} AND db_timestamp >= ? $toDbTimestampParamCondition $behindCurrentTimeIntervalCondition @@ -100,7 +105,7 @@ private[r2dbc] class PostgresQueryDao(settings: R2dbcSettings, connectionFactory protected def selectBucketsSql(minSlice: Int, maxSlice: Int): String = { sql""" SELECT extract(EPOCH from db_timestamp)::BIGINT / 10 AS bucket, count(*) AS count - FROM $journalTable + FROM ${journalTable(minSlice)} WHERE entity_type = ? AND ${sliceCondition(minSlice, maxSlice)} AND db_timestamp >= ? AND db_timestamp <= ? @@ -109,23 +114,23 @@ private[r2dbc] class PostgresQueryDao(settings: R2dbcSettings, connectionFactory """ } - private val selectTimestampOfEventSql = sql""" - SELECT db_timestamp FROM $journalTable + protected def selectTimestampOfEventSql(slice: Int) = sql""" + SELECT db_timestamp FROM ${journalTable(slice)} WHERE persistence_id = ? AND seq_nr = ? AND deleted = $sqlFalse""" - protected val selectOneEventSql = sql""" + protected def selectOneEventSql(slice: Int) = sql""" SELECT slice, entity_type, db_timestamp, $sqlDbTimestamp AS read_db_timestamp, event_ser_id, event_ser_manifest, event_payload, meta_ser_id, meta_ser_manifest, meta_payload, tags - FROM $journalTable + FROM ${journalTable(slice)} WHERE persistence_id = ? AND seq_nr = ? AND deleted = $sqlFalse""" - private val selectOneEventWithoutPayloadSql = sql""" + protected def selectOneEventWithoutPayloadSql(slice: Int) = sql""" SELECT slice, entity_type, db_timestamp, CURRENT_TIMESTAMP AS read_db_timestamp, event_ser_id, event_ser_manifest, meta_ser_id, meta_ser_manifest, meta_payload, tags - FROM $journalTable + FROM ${journalTable(slice)} WHERE persistence_id = ? AND seq_nr = ? AND deleted = $sqlFalse""" - protected val selectEventsSql = sql""" + protected def selectEventsSql(slice: Int) = sql""" SELECT slice, entity_type, persistence_id, seq_nr, db_timestamp, CURRENT_TIMESTAMP AS read_db_timestamp, event_ser_id, event_ser_manifest, event_payload, writer, adapter_manifest, meta_ser_id, meta_ser_manifest, meta_payload, tags - from $journalTable + from ${journalTable(slice)} WHERE persistence_id = ? AND seq_nr >= ? AND seq_nr <= ? AND deleted = false ORDER BY seq_nr @@ -142,26 +147,22 @@ private[r2dbc] class PostgresQueryDao(settings: R2dbcSettings, connectionFactory .bind(2, toSequenceNr) .bind(3, settings.querySettings.bufferSize) - protected val allPersistenceIdsSql = - sql"SELECT DISTINCT(persistence_id) from $journalTable ORDER BY persistence_id LIMIT ?" - - protected val persistenceIdsForEntityTypeSql = - sql"SELECT DISTINCT(persistence_id) from $journalTable WHERE persistence_id LIKE ? ORDER BY persistence_id LIMIT ?" + protected def allPersistenceIdsSql(minSlice: Int) = { + sql"SELECT DISTINCT(persistence_id) from ${journalTable(minSlice)} ORDER BY persistence_id LIMIT ?" + } - protected val allPersistenceIdsAfterSql = - sql"SELECT DISTINCT(persistence_id) from $journalTable WHERE persistence_id > ? ORDER BY persistence_id LIMIT ?" + protected def persistenceIdsForEntityTypeSql(minSlice: Int) = + sql"SELECT DISTINCT(persistence_id) from ${journalTable(minSlice)} WHERE persistence_id LIKE ? ORDER BY persistence_id LIMIT ?" - protected val persistenceIdsForEntityTypeAfterSql = - sql"SELECT DISTINCT(persistence_id) from $journalTable WHERE persistence_id LIKE ? AND persistence_id > ? ORDER BY persistence_id LIMIT ?" + protected def allPersistenceIdsAfterSql(minSlice: Int) = + sql"SELECT DISTINCT(persistence_id) from ${journalTable(minSlice)} WHERE persistence_id > ? ORDER BY persistence_id LIMIT ?" - protected val r2dbcExecutor = new R2dbcExecutor( - connectionFactory, - log, - settings.logDbCallsExceeding, - settings.connectionFactorySettings.poolSettings.closeCallsExceeding)(ec, system) + protected def persistenceIdsForEntityTypeAfterSql(minSlice: Int) = + sql"SELECT DISTINCT(persistence_id) from ${journalTable(minSlice)} WHERE persistence_id LIKE ? AND persistence_id > ? ORDER BY persistence_id LIMIT ?" - def currentDbTimestamp(): Future[Instant] = { - r2dbcExecutor + override def currentDbTimestamp(slice: Int): Future[Instant] = { + val executor = executorProvider.executorFor(slice) + executor .selectOne("select current db timestamp")( connection => connection.createStatement(currentDbTimestampSql), row => row.getTimestamp("db_timestamp")) @@ -196,7 +197,13 @@ private[r2dbc] class PostgresQueryDao(settings: R2dbcSettings, connectionFactory toTimestamp: Option[Instant], behindCurrentTime: FiniteDuration, backtracking: Boolean): Source[SerializedJournalRow, NotUsed] = { - val result = r2dbcExecutor.select(s"select eventsBySlices [$minSlice - $maxSlice]")( + + if (!settings.isSliceRangeWithinSameDataPartition(minSlice, maxSlice)) + throw new IllegalArgumentException( + s"Slice range [$minSlice-$maxSlice] spans over more than one " + + s"of the [${settings.numberOfDataPartitions}] data partitions.") + val executor = executorProvider.executorFor(minSlice) + val result = executor.select(s"select eventsBySlices [$minSlice - $maxSlice]")( connection => { val stmt = connection .createStatement( @@ -263,6 +270,7 @@ private[r2dbc] class PostgresQueryDao(settings: R2dbcSettings, connectionFactory maxSlice: Int, fromTimestamp: Instant, limit: Int): Future[Seq[Bucket]] = { + val executor = executorProvider.executorFor(minSlice) val toTimestamp = { val now = InstantFactory.now() // not important to use database time @@ -275,7 +283,7 @@ private[r2dbc] class PostgresQueryDao(settings: R2dbcSettings, connectionFactory } } - val result = r2dbcExecutor.select(s"select bucket counts [$minSlice - $maxSlice]")( + val result = executor.select(s"select bucket counts [$minSlice - $maxSlice]")( connection => { val stmt = connection.createStatement(selectBucketsSql(minSlice, maxSlice)) bindSelectBucketsSql(stmt, entityType, fromTimestamp, toTimestamp, limit) @@ -298,10 +306,12 @@ private[r2dbc] class PostgresQueryDao(settings: R2dbcSettings, connectionFactory override def countBucketsMayChange: Boolean = false override def timestampOfEvent(persistenceId: String, seqNr: Long): Future[Option[Instant]] = { - r2dbcExecutor.selectOne("select timestampOfEvent")( + val slice = persistenceExt.sliceForPersistenceId(persistenceId) + val executor = executorProvider.executorFor(slice) + executor.selectOne("select timestampOfEvent")( connection => connection - .createStatement(selectTimestampOfEventSql) + .createStatement(selectTimestampOfEventSql(slice)) .bind(0, persistenceId) .bind(1, seqNr), row => row.getTimestamp("db_timestamp")) @@ -310,10 +320,12 @@ private[r2dbc] class PostgresQueryDao(settings: R2dbcSettings, connectionFactory override def loadEvent( persistenceId: String, seqNr: Long, - includePayload: Boolean): Future[Option[SerializedJournalRow]] = - r2dbcExecutor.selectOne("select one event")( + includePayload: Boolean): Future[Option[SerializedJournalRow]] = { + val slice = persistenceExt.sliceForPersistenceId(persistenceId) + val executor = executorProvider.executorFor(slice) + executor.selectOne("select one event")( connection => { - val selectSql = if (includePayload) selectOneEventSql else selectOneEventWithoutPayloadSql + val selectSql = if (includePayload) selectOneEventSql(slice) else selectOneEventWithoutPayloadSql(slice) connection .createStatement(selectSql) .bind(0, persistenceId) @@ -338,15 +350,17 @@ private[r2dbc] class PostgresQueryDao(settings: R2dbcSettings, connectionFactory tags = row.getTags("tags"), metadata = readMetadata(row)) }) + } override def eventsByPersistenceId( persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long): Source[SerializedJournalRow, NotUsed] = { - - val result = r2dbcExecutor.select(s"select eventsByPersistenceId [$persistenceId]")( + val slice = persistenceExt.sliceForPersistenceId(persistenceId) + val executor = executorProvider.executorFor(slice) + val result = executor.select(s"select eventsByPersistenceId [$persistenceId]")( connection => { - val stmt = connection.createStatement(selectEventsSql) + val stmt = connection.createStatement(selectEventsSql(slice)) bindSelectEventsSql(stmt, persistenceId, fromSequenceNr, toSequenceNr, settings.querySettings.bufferSize) }, row => @@ -393,25 +407,38 @@ private[r2dbc] class PostgresQueryDao(settings: R2dbcSettings, connectionFactory } override def persistenceIds(entityType: String, afterId: Option[String], limit: Long): Source[String, NotUsed] = { + val actualLimit = if (limit > Int.MaxValue) Int.MaxValue else limit.toInt val likeStmtPostfix = PersistenceId.DefaultSeparator + "%" - val result = r2dbcExecutor.select(s"select persistenceIds by entity type")( - connection => - afterId match { - case Some(after) => - val stmt = connection.createStatement(persistenceIdsForEntityTypeAfterSql) - bindPersistenceIdsForEntityTypeAfterSql(stmt, entityType, likeStmtPostfix, after, limit) - - case None => - val stmt = connection.createStatement(persistenceIdsForEntityTypeSql) - bindPersistenceIdsForEntityTypeSql(stmt, entityType, likeStmtPostfix, limit) + val results: immutable.IndexedSeq[Future[immutable.IndexedSeq[String]]] = + // query each data partition + settings.dataPartitionSliceRanges.map { sliceRange => + val executor = executorProvider.executorFor(sliceRange.min) + executor.select(s"select persistenceIds by entity type")( + connection => + afterId match { + case Some(after) => + val stmt = connection.createStatement(persistenceIdsForEntityTypeAfterSql(sliceRange.min)) + bindPersistenceIdsForEntityTypeAfterSql(stmt, entityType, likeStmtPostfix, after, actualLimit) + + case None => + val stmt = connection.createStatement(persistenceIdsForEntityTypeSql(sliceRange.min)) + bindPersistenceIdsForEntityTypeSql(stmt, entityType, likeStmtPostfix, actualLimit) + + }, + row => row.get("persistence_id", classOf[String])) + } - }, - row => row.get("persistence_id", classOf[String])) + // Theoretically it could blow up with too many rows (> Int.MaxValue) when fetching from more than + // one data partition, but we have other places with a hard limit of a total number of persistenceIds less + // than Int.MaxValue. + val combined: Future[immutable.IndexedSeq[String]] = + if (results.size == 1) results.head // no data partition databases + else Future.sequence(results).map(_.flatten.sorted.take(actualLimit)) if (log.isDebugEnabled) - result.foreach(rows => log.debug("Read [{}] persistence ids by entity type [{}]", rows.size, entityType)) + combined.foreach(rows => log.debug("Read [{}] persistence ids by entity type [{}]", rows.size, entityType)) - Source.futureSource(result.map(Source(_))).mapMaterializedValue(_ => NotUsed) + Source.futureSource(combined.map(Source(_))).mapMaterializedValue(_ => NotUsed) } protected def bindAllPersistenceIdsAfterSql(stmt: Statement, after: String, limit: Long): Statement = { @@ -421,24 +448,37 @@ private[r2dbc] class PostgresQueryDao(settings: R2dbcSettings, connectionFactory } override def persistenceIds(afterId: Option[String], limit: Long): Source[String, NotUsed] = { - val result = r2dbcExecutor.select(s"select persistenceIds")( - connection => - afterId match { - case Some(after) => - val stmt = connection.createStatement(allPersistenceIdsAfterSql) - bindAllPersistenceIdsAfterSql(stmt, after, limit) - - case None => - connection - .createStatement(allPersistenceIdsSql) - .bind(0, limit) - }, - row => row.get("persistence_id", classOf[String])) + val actualLimit = if (limit > Int.MaxValue) Int.MaxValue else limit.toInt + val results: immutable.IndexedSeq[Future[immutable.IndexedSeq[String]]] = + // query each data partition + settings.dataPartitionSliceRanges.map { sliceRange => + val executor = executorProvider.executorFor(sliceRange.min) + executor.select(s"select persistenceIds")( + connection => + afterId match { + case Some(after) => + val stmt = connection.createStatement(allPersistenceIdsAfterSql(sliceRange.min)) + bindAllPersistenceIdsAfterSql(stmt, after, actualLimit) + + case None => + connection + .createStatement(allPersistenceIdsSql(sliceRange.min)) + .bind(0, actualLimit) + }, + row => row.get("persistence_id", classOf[String])) + } + + // Theoretically it could blow up with too many rows (> Int.MaxValue) when fetching from more than + // one data partition, but we have other places with a hard limit of a total number of persistenceIds less + // than Int.MaxValue. + val combined: Future[immutable.IndexedSeq[String]] = + if (results.size == 1) results.head // no data partitions + else Future.sequence(results).map(_.flatten.sorted.take(actualLimit)) if (log.isDebugEnabled) - result.foreach(rows => log.debug("Read [{}] persistence ids", rows.size)) + combined.foreach(rows => log.debug("Read [{}] persistence ids", rows.size)) - Source.futureSource(result.map(Source(_))).mapMaterializedValue(_ => NotUsed) + Source.futureSource(combined.map(Source(_))).mapMaterializedValue(_ => NotUsed) } } diff --git a/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/PostgresSnapshotDao.scala b/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/PostgresSnapshotDao.scala index f5ea95f3..551d5570 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/PostgresSnapshotDao.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/PostgresSnapshotDao.scala @@ -29,6 +29,7 @@ import akka.persistence.r2dbc.internal.InstantFactory import akka.persistence.r2dbc.internal.codec.PayloadCodec.RichRow import akka.persistence.r2dbc.internal.codec.PayloadCodec.RichStatement import akka.persistence.r2dbc.internal.R2dbcExecutor +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider import akka.persistence.r2dbc.internal.SnapshotDao import akka.persistence.r2dbc.internal.Sql.InterpolationWithAdapter import akka.persistence.r2dbc.internal.codec.TagsCodec.TagsCodecRichStatement @@ -49,7 +50,7 @@ private[r2dbc] object PostgresSnapshotDao { * INTERNAL API */ @InternalApi -private[r2dbc] class PostgresSnapshotDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit +private[r2dbc] class PostgresSnapshotDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit ec: ExecutionContext, system: ActorSystem[_]) extends SnapshotDao { @@ -60,11 +61,7 @@ private[r2dbc] class PostgresSnapshotDao(settings: R2dbcSettings, connectionFact protected val snapshotTable: String = settings.snapshotsTableWithSchema - protected val r2dbcExecutor = new R2dbcExecutor( - connectionFactory, - log, - settings.logDbCallsExceeding, - settings.connectionFactorySettings.poolSettings.closeCallsExceeding)(ec, system) + protected val r2dbcExecutor = executorProvider.executorFor(slice = 0) // FIXME support data partitions protected def createUpsertSql: String = { // db_timestamp and tags columns were added in 1.2.0 @@ -342,7 +339,7 @@ private[r2dbc] class PostgresSnapshotDao(settings: R2dbcSettings, connectionFact /** * This is used from `BySliceQuery`, i.e. only if settings.querySettings.startFromSnapshotEnabled */ - override def currentDbTimestamp(): Future[Instant] = { + override def currentDbTimestamp(slice: Int): Future[Instant] = { r2dbcExecutor .selectOne("select current db timestamp")( connection => connection.createStatement(currentDbTimestampSql), diff --git a/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/YugabyteDialect.scala b/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/YugabyteDialect.scala index af620230..5f76925b 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/YugabyteDialect.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/YugabyteDialect.scala @@ -15,6 +15,8 @@ import akka.persistence.r2dbc.internal.SnapshotDao import com.typesafe.config.Config import io.r2dbc.spi.ConnectionFactory +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider + /** * INTERNAL API */ @@ -26,19 +28,19 @@ private[r2dbc] object YugabyteDialect extends Dialect { override def createConnectionFactory(config: Config): ConnectionFactory = PostgresDialect.createConnectionFactory(config) - override def createJournalDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit + override def createJournalDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit system: ActorSystem[_]): JournalDao = - new PostgresJournalDao(settings, connectionFactory)(system.executionContext, system) + new PostgresJournalDao(settings, executorProvider)(system.executionContext, system) - override def createSnapshotDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit + override def createSnapshotDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit system: ActorSystem[_]): SnapshotDao = - new YugabyteSnapshotDao(settings, connectionFactory)(system.executionContext, system) + new YugabyteSnapshotDao(settings, executorProvider)(system.executionContext, system) - override def createQueryDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit + override def createQueryDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit system: ActorSystem[_]): QueryDao = - new YugabyteQueryDao(settings, connectionFactory)(system.executionContext, system) + new YugabyteQueryDao(settings, executorProvider)(system.executionContext, system) - override def createDurableStateDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit + override def createDurableStateDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit system: ActorSystem[_]): DurableStateDao = - new YugabyteDurableStateDao(settings, connectionFactory, this)(system.executionContext, system) + new YugabyteDurableStateDao(settings, executorProvider, this)(system.executionContext, system) } diff --git a/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/YugabyteDurableStateDao.scala b/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/YugabyteDurableStateDao.scala index 9a9ba030..ceb1630c 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/YugabyteDurableStateDao.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/YugabyteDurableStateDao.scala @@ -6,7 +6,6 @@ package akka.persistence.r2dbc.internal.postgres import scala.concurrent.ExecutionContext -import io.r2dbc.spi._ import org.slf4j.Logger import org.slf4j.LoggerFactory @@ -14,6 +13,7 @@ import akka.actor.typed.ActorSystem import akka.annotation.InternalApi import akka.persistence.r2dbc.R2dbcSettings import akka.persistence.r2dbc.internal.Dialect +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider /** * INTERNAL API @@ -21,9 +21,9 @@ import akka.persistence.r2dbc.internal.Dialect @InternalApi private[r2dbc] final class YugabyteDurableStateDao( settings: R2dbcSettings, - connectionFactory: ConnectionFactory, + executorProvider: R2dbcExecutorProvider, dialect: Dialect)(implicit ec: ExecutionContext, system: ActorSystem[_]) - extends PostgresDurableStateDao(settings, connectionFactory, dialect) { + extends PostgresDurableStateDao(settings, executorProvider, dialect) { override protected lazy val log: Logger = LoggerFactory.getLogger(classOf[YugabyteDurableStateDao]) diff --git a/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/YugabyteQueryDao.scala b/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/YugabyteQueryDao.scala index 875e9a97..684133ab 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/YugabyteQueryDao.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/YugabyteQueryDao.scala @@ -13,14 +13,16 @@ import io.r2dbc.spi.ConnectionFactory import org.slf4j.Logger import org.slf4j.LoggerFactory +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider + /** * INTERNAL API */ @InternalApi -private[r2dbc] final class YugabyteQueryDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit +private[r2dbc] final class YugabyteQueryDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit ec: ExecutionContext, system: ActorSystem[_]) - extends PostgresQueryDao(settings, connectionFactory) { + extends PostgresQueryDao(settings, executorProvider) { override protected lazy val log: Logger = LoggerFactory.getLogger(classOf[YugabyteQueryDao]) diff --git a/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/YugabyteSnapshotDao.scala b/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/YugabyteSnapshotDao.scala index 28936f70..348dc9e9 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/YugabyteSnapshotDao.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/internal/postgres/YugabyteSnapshotDao.scala @@ -13,14 +13,17 @@ import io.r2dbc.spi.ConnectionFactory import org.slf4j.Logger import org.slf4j.LoggerFactory +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider + /** * INTERNAL API */ @InternalApi -private[r2dbc] final class YugabyteSnapshotDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit +private[r2dbc] final class YugabyteSnapshotDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)( + implicit ec: ExecutionContext, system: ActorSystem[_]) - extends PostgresSnapshotDao(settings, connectionFactory) { + extends PostgresSnapshotDao(settings, executorProvider) { override protected lazy val log: Logger = LoggerFactory.getLogger(classOf[YugabyteSnapshotDao]) diff --git a/core/src/main/scala/akka/persistence/r2dbc/internal/sqlserver/SqlServerDialect.scala b/core/src/main/scala/akka/persistence/r2dbc/internal/sqlserver/SqlServerDialect.scala index 43bfe2e7..8bb9f0ad 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/internal/sqlserver/SqlServerDialect.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/internal/sqlserver/SqlServerDialect.scala @@ -23,6 +23,8 @@ import io.r2dbc.spi.ConnectionFactories import io.r2dbc.spi.ConnectionFactory import io.r2dbc.spi.ConnectionFactoryOptions +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider + /** * INTERNAL API */ @@ -83,19 +85,19 @@ private[r2dbc] object SqlServerDialect extends Dialect { .build()) } - override def createJournalDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit + override def createJournalDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit system: ActorSystem[_]): JournalDao = - new SqlServerJournalDao(settings, connectionFactory)(system.executionContext, system) + new SqlServerJournalDao(settings, executorProvider)(system.executionContext, system) - override def createQueryDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit + override def createQueryDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit system: ActorSystem[_]): QueryDao = - new SqlServerQueryDao(settings, connectionFactory)(system.executionContext, system) + new SqlServerQueryDao(settings, executorProvider)(system.executionContext, system) - override def createSnapshotDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit + override def createSnapshotDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit system: ActorSystem[_]): SnapshotDao = - new SqlServerSnapshotDao(settings, connectionFactory)(system.executionContext, system) + new SqlServerSnapshotDao(settings, executorProvider)(system.executionContext, system) - override def createDurableStateDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit + override def createDurableStateDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit system: ActorSystem[_]): DurableStateDao = - new SqlServerDurableStateDao(settings, connectionFactory, this)(system.executionContext, system) + new SqlServerDurableStateDao(settings, executorProvider, this)(system.executionContext, system) } diff --git a/core/src/main/scala/akka/persistence/r2dbc/internal/sqlserver/SqlServerDurableStateDao.scala b/core/src/main/scala/akka/persistence/r2dbc/internal/sqlserver/SqlServerDurableStateDao.scala index 3414d2b4..43da199f 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/internal/sqlserver/SqlServerDurableStateDao.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/internal/sqlserver/SqlServerDurableStateDao.scala @@ -20,12 +20,12 @@ import akka.persistence.r2dbc.internal.Sql.InterpolationWithAdapter import akka.persistence.r2dbc.internal.codec.TimestampCodec.TimestampCodecRichStatement import akka.persistence.r2dbc.internal.postgres.PostgresDurableStateDao import akka.persistence.r2dbc.internal.postgres.PostgresDurableStateDao.EvaluatedAdditionalColumnBindings -import io.r2dbc.spi.ConnectionFactory import io.r2dbc.spi.Statement import org.slf4j.Logger import org.slf4j.LoggerFactory import akka.persistence.r2dbc.internal.InstantFactory +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider /** * INTERNAL API @@ -41,9 +41,9 @@ private[r2dbc] object SqlServerDurableStateDao { @InternalApi private[r2dbc] class SqlServerDurableStateDao( settings: R2dbcSettings, - connectionFactory: ConnectionFactory, + executorProvider: R2dbcExecutorProvider, dialect: Dialect)(implicit ec: ExecutionContext, system: ActorSystem[_]) - extends PostgresDurableStateDao(settings, connectionFactory, dialect) { + extends PostgresDurableStateDao(settings, executorProvider, dialect) { import settings.codecSettings.DurableStateImplicits._ require(settings.useAppTimestamp, "SqlServer requires akka.persistence.r2dbc.use-app-timestamp=on") @@ -195,6 +195,6 @@ private[r2dbc] class SqlServerDurableStateDao( .bind("@persistenceId", after) .bind("@limit", limit) - override def currentDbTimestamp(): Future[Instant] = Future.successful(InstantFactory.now()) + override def currentDbTimestamp(slice: Int): Future[Instant] = Future.successful(InstantFactory.now()) } diff --git a/core/src/main/scala/akka/persistence/r2dbc/internal/sqlserver/SqlServerJournalDao.scala b/core/src/main/scala/akka/persistence/r2dbc/internal/sqlserver/SqlServerJournalDao.scala index 47a3cccd..3ea0ee12 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/internal/sqlserver/SqlServerJournalDao.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/internal/sqlserver/SqlServerJournalDao.scala @@ -18,6 +18,7 @@ import org.slf4j.Logger import org.slf4j.LoggerFactory import akka.persistence.r2dbc.internal.InstantFactory +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider /** * INTERNAL API @@ -32,10 +33,10 @@ private[r2dbc] object SqlServerJournalDao { * INTERNAL API */ @InternalApi -private[r2dbc] class SqlServerJournalDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit +private[r2dbc] class SqlServerJournalDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit ec: ExecutionContext, system: ActorSystem[_]) - extends PostgresJournalDao(settings, connectionFactory) { + extends PostgresJournalDao(settings, executorProvider) { import settings.codecSettings.JournalImplicits._ require(settings.useAppTimestamp, "SqlServer requires akka.persistence.r2dbc.use-app-timestamp=on") @@ -45,9 +46,9 @@ private[r2dbc] class SqlServerJournalDao(settings: R2dbcSettings, connectionFact override def log = SqlServerJournalDao.log - override protected val insertEventWithParameterTimestampSql = + override protected def insertEventWithParameterTimestampSql(slice: Int) = sql""" - INSERT INTO $journalTable + INSERT INTO ${journalTable(slice)} (slice, entity_type, persistence_id, seq_nr, writer, adapter_manifest, event_ser_id, event_ser_manifest, event_payload, tags, meta_ser_id, meta_ser_manifest, meta_payload, db_timestamp) OUTPUT inserted.db_timestamp VALUES (@slice, @entityType, @persistenceId, @seqNr, @writer, @adapterManifest, @eventSerId, @eventSerManifest, @eventPayload, @tags, @metaSerId, @metaSerManifest, @metaSerPayload, @dbTimestamp)""" @@ -55,5 +56,6 @@ private[r2dbc] class SqlServerJournalDao(settings: R2dbcSettings, connectionFact override protected def bindTimestampNow(stmt: Statement, getAndIncIndex: () => Int): Statement = stmt.bindTimestamp(getAndIncIndex(), InstantFactory.now()) - override def insertDeleteMarkerSql(timestamp: String): String = super.insertDeleteMarkerSql("?") + override def insertDeleteMarkerSql(slice: Int, timestamp: String): String = + super.insertDeleteMarkerSql(slice, "?") } diff --git a/core/src/main/scala/akka/persistence/r2dbc/internal/sqlserver/SqlServerQueryDao.scala b/core/src/main/scala/akka/persistence/r2dbc/internal/sqlserver/SqlServerQueryDao.scala index 7cdb18d8..8256a3c2 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/internal/sqlserver/SqlServerQueryDao.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/internal/sqlserver/SqlServerQueryDao.scala @@ -23,6 +23,8 @@ import io.r2dbc.spi.Statement import org.slf4j.Logger import org.slf4j.LoggerFactory +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider + /** * INTERNAL API */ @@ -36,10 +38,10 @@ private[r2dbc] object SqlServerQueryDao { * INTERNAL API */ @InternalApi -private[r2dbc] class SqlServerQueryDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit +private[r2dbc] class SqlServerQueryDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit ec: ExecutionContext, system: ActorSystem[_]) - extends PostgresQueryDao(settings, connectionFactory) { + extends PostgresQueryDao(settings, executorProvider) { import settings.codecSettings.JournalImplicits._ override def sqlFalse = "0" @@ -48,10 +50,10 @@ private[r2dbc] class SqlServerQueryDao(settings: R2dbcSettings, connectionFactor override def log = SqlServerQueryDao.log override protected def sqlDbTimestamp = "SYSUTCDATETIME()" - override protected val selectEventsSql = + override protected def selectEventsSql(slice: Int) = sql""" SELECT TOP(@limit) slice, entity_type, persistence_id, seq_nr, db_timestamp, SYSUTCDATETIME() AS read_db_timestamp, event_ser_id, event_ser_manifest, event_payload, writer, adapter_manifest, meta_ser_id, meta_ser_manifest, meta_payload, tags - from $journalTable + from ${journalTable(slice)} WHERE persistence_id = @persistenceId AND seq_nr >= @from AND seq_nr <= @to AND deleted = $sqlFalse ORDER BY seq_nr""" @@ -78,7 +80,7 @@ private[r2dbc] class SqlServerQueryDao(settings: R2dbcSettings, connectionFactor sql""" SELECT TOP(@limit) bucket, count(*) as count from (select DATEDIFF(s,'1970-01-01 00:00:00', db_timestamp)/10 as bucket - FROM $journalTable + FROM ${journalTable(minSlice)} WHERE entity_type = @entityType AND ${sliceCondition(minSlice, maxSlice)} AND db_timestamp >= @fromTimestamp AND db_timestamp <= @toTimestamp @@ -124,7 +126,7 @@ private[r2dbc] class SqlServerQueryDao(settings: R2dbcSettings, connectionFactor sql""" $selectColumns - FROM $journalTable + FROM ${journalTable(minSlice)} WHERE entity_type = @entityType AND ${sliceCondition(minSlice, maxSlice)} AND db_timestamp >= @from $toDbTimestampParamCondition $behindCurrentTimeIntervalCondition @@ -145,11 +147,12 @@ private[r2dbc] class SqlServerQueryDao(settings: R2dbcSettings, connectionFactor stmt } - override protected val persistenceIdsForEntityTypeAfterSql: String = + override protected def persistenceIdsForEntityTypeAfterSql(minSlice: Int): String = { sql""" SELECT TOP(@limit) persistence_id FROM ( - SELECT DISTINCT(persistence_id) from $journalTable WHERE persistence_id LIKE @persistenceIdLike AND persistence_id > @persistenceId + SELECT DISTINCT(persistence_id) from ${journalTable(minSlice)} WHERE persistence_id LIKE @persistenceIdLike AND persistence_id > @persistenceId ) as sub ORDER BY persistence_id""" + } override protected def bindPersistenceIdsForEntityTypeAfterSql( stmt: Statement, @@ -163,11 +166,12 @@ private[r2dbc] class SqlServerQueryDao(settings: R2dbcSettings, connectionFactor .bind("@persistenceId", afterPersistenceId) } - override protected val persistenceIdsForEntityTypeSql: String = + override protected def persistenceIdsForEntityTypeSql(minSlice: Int): String = { sql""" SELECT TOP(@limit) persistence_id FROM ( - SELECT DISTINCT(persistence_id) from $journalTable WHERE persistence_id LIKE @persistenceIdLike + SELECT DISTINCT(persistence_id) from ${journalTable(minSlice)} WHERE persistence_id LIKE @persistenceIdLike ) as sub ORDER BY persistence_id""" + } override protected def bindPersistenceIdsForEntityTypeSql( stmt: Statement, @@ -187,15 +191,17 @@ private[r2dbc] class SqlServerQueryDao(settings: R2dbcSettings, connectionFactor .bind("@limit", limit) .bind("@persistenceId", afterPersistenceId) } - override protected val allPersistenceIdsAfterSql: String = + override protected def allPersistenceIdsAfterSql(minSlice: Int): String = { sql""" SELECT TOP(@limit) persistence_id FROM ( - SELECT DISTINCT(persistence_id) from $journalTable WHERE persistence_id > @persistenceId + SELECT DISTINCT(persistence_id) from ${journalTable(minSlice)} WHERE persistence_id > @persistenceId ) as sub ORDER BY persistence_id""" + } - override protected val allPersistenceIdsSql: String = - sql"SELECT TOP(@limit) persistence_id FROM (SELECT DISTINCT(persistence_id) from $journalTable) as sub ORDER BY persistence_id" + override protected def allPersistenceIdsSql(minSlice: Int): String = { + sql"SELECT TOP(@limit) persistence_id FROM (SELECT DISTINCT(persistence_id) from ${journalTable(minSlice)}) as sub ORDER BY persistence_id" + } - override def currentDbTimestamp(): Future[Instant] = Future.successful(InstantFactory.now()) + override def currentDbTimestamp(slice: Int): Future[Instant] = Future.successful(InstantFactory.now()) } diff --git a/core/src/main/scala/akka/persistence/r2dbc/internal/sqlserver/SqlServerSnapshotDao.scala b/core/src/main/scala/akka/persistence/r2dbc/internal/sqlserver/SqlServerSnapshotDao.scala index 451a18bd..4c574ef5 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/internal/sqlserver/SqlServerSnapshotDao.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/internal/sqlserver/SqlServerSnapshotDao.scala @@ -20,12 +20,12 @@ import akka.persistence.r2dbc.internal.Sql.InterpolationWithAdapter import akka.persistence.r2dbc.internal.codec.TagsCodec.TagsCodecRichStatement import akka.persistence.r2dbc.internal.codec.TimestampCodec.TimestampCodecRichStatement import akka.persistence.r2dbc.internal.postgres.PostgresSnapshotDao -import io.r2dbc.spi.ConnectionFactory import io.r2dbc.spi.Statement import org.slf4j.Logger import org.slf4j.LoggerFactory import akka.persistence.r2dbc.internal.InstantFactory +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider /** * INTERNAL API @@ -39,10 +39,10 @@ private[r2dbc] object SqlServerSnapshotDao { * INTERNAL API */ @InternalApi -private[r2dbc] class SqlServerSnapshotDao(settings: R2dbcSettings, connectionFactory: ConnectionFactory)(implicit +private[r2dbc] class SqlServerSnapshotDao(settings: R2dbcSettings, executorProvider: R2dbcExecutorProvider)(implicit ec: ExecutionContext, system: ActorSystem[_]) - extends PostgresSnapshotDao(settings, connectionFactory) { + extends PostgresSnapshotDao(settings, executorProvider) { import settings.codecSettings.SnapshotImplicits._ override def log: Logger = SqlServerSnapshotDao.log @@ -206,6 +206,6 @@ private[r2dbc] class SqlServerSnapshotDao(settings: R2dbcSettings, connectionFac ORDER BY db_timestamp, seq_nr """ - override def currentDbTimestamp(): Future[Instant] = Future.successful(InstantFactory.now()) + override def currentDbTimestamp(slice: Int): Future[Instant] = Future.successful(InstantFactory.now()) } diff --git a/core/src/main/scala/akka/persistence/r2dbc/journal/R2dbcJournal.scala b/core/src/main/scala/akka/persistence/r2dbc/journal/R2dbcJournal.scala index fd3c9725..5edbe1c1 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/journal/R2dbcJournal.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/journal/R2dbcJournal.scala @@ -27,7 +27,6 @@ import akka.persistence.SerializedEvent import akka.persistence.journal.AsyncWriteJournal import akka.persistence.journal.Tagged import akka.persistence.query.PersistenceQuery -import akka.persistence.r2dbc.ConnectionFactoryProvider import akka.persistence.r2dbc.R2dbcSettings import akka.persistence.r2dbc.internal.InstantFactory import akka.persistence.r2dbc.internal.JournalDao @@ -41,6 +40,9 @@ import akka.serialization.SerializationExtension import akka.serialization.Serializers import akka.stream.scaladsl.Sink import com.typesafe.config.Config +import org.slf4j.LoggerFactory + +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider /** * INTERNAL API @@ -89,16 +91,17 @@ private[r2dbc] final class R2dbcJournal(config: Config, cfgPath: String) extends private val sharedConfigPath = cfgPath.replaceAll("""\.journal$""", "") private val serialization: Serialization = SerializationExtension(context.system) - private val journalSettings = R2dbcSettings(context.system.settings.config.getConfig(sharedConfigPath)) - log.debug("R2DBC journal starting up with dialect [{}]", journalSettings.dialectName) + private val settings = R2dbcSettings(context.system.settings.config.getConfig(sharedConfigPath)) + log.debug("R2DBC journal starting up with dialect [{}]", settings.dialectName) - private val journalDao = journalSettings.connectionFactorySettings.dialect.createJournalDao( - journalSettings, - ConnectionFactoryProvider(system).connectionFactoryFor(sharedConfigPath + ".connection-factory")) + private val executorProvider = + new R2dbcExecutorProvider(settings, sharedConfigPath + ".connection-factory", LoggerFactory.getLogger(getClass)) + private val journalDao = + settings.connectionFactorySettings.dialect.createJournalDao(settings, executorProvider) private val query = PersistenceQuery(system).readJournalFor[R2dbcReadJournal](sharedConfigPath + ".query") private val pubSub: Option[PubSub] = - if (journalSettings.journalPublishEvents) Some(PubSub(system)) + if (settings.journalPublishEvents) Some(PubSub(system)) else None // if there are pending writes when an actor restarts we must wait for @@ -111,7 +114,7 @@ private[r2dbc] final class R2dbcJournal(config: Config, cfgPath: String) extends override def asyncWriteMessages(messages: immutable.Seq[AtomicWrite]): Future[immutable.Seq[Try[Unit]]] = { def atomicWrite(atomicWrite: AtomicWrite): Future[Instant] = { - val timestamp = if (journalSettings.useAppTimestamp) InstantFactory.now() else JournalDao.EmptyDbTimestamp + val timestamp = if (settings.useAppTimestamp) InstantFactory.now() else JournalDao.EmptyDbTimestamp val serialized: Try[Seq[SerializedJournalRow]] = Try { atomicWrite.payload.map { pr => val (event, tags) = pr.payload match { diff --git a/core/src/main/scala/akka/persistence/r2dbc/query/javadsl/R2dbcReadJournal.scala b/core/src/main/scala/akka/persistence/r2dbc/query/javadsl/R2dbcReadJournal.scala index c12c8efe..342e2692 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/query/javadsl/R2dbcReadJournal.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/query/javadsl/R2dbcReadJournal.scala @@ -92,6 +92,11 @@ final class R2dbcReadJournal(delegate: scaladsl.R2dbcReadJournal) * The stream is not completed when it reaches the end of the currently stored events, but it continues to push new * events when new events are persisted. Corresponding query that is completed when it reaches the end of the * currently stored events is provided by [[R2dbcReadJournal.currentEventsBySlices]]. + * + * The slice range cannot span over more than one data partition, which in practise means that the number of + * Projection instances must be be greater than or equal to the number of data partitions. For example, with 4 data + * partitions the slice range (0 - 255) is allowed but not (0 - 511). Smaller slice range such as (0 - 127) is also + * allowed. */ override def eventsBySlices[Event]( entityType: String, diff --git a/core/src/main/scala/akka/persistence/r2dbc/query/scaladsl/R2dbcReadJournal.scala b/core/src/main/scala/akka/persistence/r2dbc/query/scaladsl/R2dbcReadJournal.scala index 73f703a9..0c340d1c 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/query/scaladsl/R2dbcReadJournal.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/query/scaladsl/R2dbcReadJournal.scala @@ -39,7 +39,6 @@ import akka.persistence.query.typed.scaladsl.EventsBySliceQuery import akka.persistence.query.typed.scaladsl.EventsBySliceStartingFromSnapshotsQuery import akka.persistence.query.typed.scaladsl.LoadEventQuery import akka.persistence.query.{ EventEnvelope => ClassicEventEnvelope } -import akka.persistence.r2dbc.ConnectionFactoryProvider import akka.persistence.r2dbc.R2dbcSettings import akka.persistence.r2dbc.internal.BySliceQuery import akka.persistence.r2dbc.internal.ContinuousQuery @@ -56,6 +55,8 @@ import akka.stream.scaladsl.Source import com.typesafe.config.Config import org.slf4j.LoggerFactory +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider + object R2dbcReadJournal { val Identifier = "akka.persistence.r2dbc.query" @@ -92,12 +93,16 @@ final class R2dbcReadJournal(system: ExtendedActorSystem, config: Config, cfgPat import typedSystem.executionContext private val serialization = SerializationExtension(system) private val persistenceExt = Persistence(system) - private val connectionFactory = ConnectionFactoryProvider(typedSystem) - .connectionFactoryFor(sharedConfigPath + ".connection-factory") + private val executorProvider = + new R2dbcExecutorProvider(settings, sharedConfigPath + ".connection-factory", LoggerFactory.getLogger(getClass))( + typedSystem.executionContext, + typedSystem) + private val journalDao = + settings.connectionFactorySettings.dialect.createJournalDao(settings, executorProvider)(typedSystem) private val queryDao = - settings.connectionFactorySettings.dialect.createQueryDao(settings, connectionFactory)(typedSystem) + settings.connectionFactorySettings.dialect.createQueryDao(settings, executorProvider)(typedSystem) private lazy val snapshotDao = - settings.connectionFactorySettings.dialect.createSnapshotDao(settings, connectionFactory)(typedSystem) + settings.connectionFactorySettings.dialect.createSnapshotDao(settings, executorProvider)(typedSystem) private val filteredPayloadSerId = SerializationExtension(system).findSerializerFor(FilteredPayload).identifier @@ -166,9 +171,6 @@ final class R2dbcReadJournal(system: ExtendedActorSystem, config: Config, cfgPat tags = row.tags) } - private val journalDao = - settings.connectionFactorySettings.dialect.createJournalDao(settings, connectionFactory)(typedSystem) - def extractEntityTypeFromPersistenceId(persistenceId: String): String = PersistenceId.extractEntityType(persistenceId) @@ -215,6 +217,11 @@ final class R2dbcReadJournal(system: ExtendedActorSystem, config: Config, cfgPat * The stream is not completed when it reaches the end of the currently stored events, but it continues to push new * events when new events are persisted. Corresponding query that is completed when it reaches the end of the * currently stored events is provided by [[R2dbcReadJournal.currentEventsBySlices]]. + * + * The slice range cannot span over more than one data partition, which in practise means that the number of + * Projection instances must be be greater than or equal to the number of data partitions. For example, with 4 data + * partitions the slice range (0 - 255) is allowed but not (0 - 511). Smaller slice range such as (0 - 127) is also + * allowed. */ override def eventsBySlices[Event]( entityType: String, diff --git a/core/src/main/scala/akka/persistence/r2dbc/session/scaladsl/R2dbcSession.scala b/core/src/main/scala/akka/persistence/r2dbc/session/scaladsl/R2dbcSession.scala index af6c1ef0..f15fbfab 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/session/scaladsl/R2dbcSession.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/session/scaladsl/R2dbcSession.scala @@ -41,6 +41,7 @@ object R2dbcSession { .connectionFactorySettingsFor(connectionFactoryConfigPath) .poolSettings .closeCallsExceeding + // FIXME support data partition? val r2dbcExecutor = new R2dbcExecutor(connectionFactory, log, logDbCallsDisabled, closeCallsExceeding)( system.executionContext, diff --git a/core/src/main/scala/akka/persistence/r2dbc/snapshot/R2dbcSnapshotStore.scala b/core/src/main/scala/akka/persistence/r2dbc/snapshot/R2dbcSnapshotStore.scala index 574fdb36..6bcce1e2 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/snapshot/R2dbcSnapshotStore.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/snapshot/R2dbcSnapshotStore.scala @@ -9,17 +9,18 @@ import java.time.Instant import akka.actor.typed.ActorSystem import akka.actor.typed.scaladsl.adapter._ import akka.persistence.{ SelectedSnapshot, SnapshotMetadata, SnapshotSelectionCriteria } -import akka.persistence.r2dbc.{ ConnectionFactoryProvider, R2dbcSettings } +import akka.persistence.r2dbc.R2dbcSettings import akka.persistence.snapshot.SnapshotStore import akka.serialization.{ Serialization, SerializationExtension } import com.typesafe.config.Config import scala.concurrent.{ ExecutionContext, Future } +import org.slf4j.LoggerFactory + import akka.annotation.InternalApi import akka.persistence.Persistence -import akka.persistence.query.typed.EventEnvelope import akka.persistence.r2dbc.internal.JournalDao -import akka.persistence.r2dbc.internal.SnapshotDao +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider import akka.persistence.r2dbc.internal.SnapshotDao.SerializedSnapshotMetadata import akka.persistence.r2dbc.internal.SnapshotDao.SerializedSnapshotRow import akka.persistence.typed.PersistenceId @@ -58,10 +59,10 @@ private[r2dbc] final class R2dbcSnapshotStore(cfg: Config, cfgPath: String) exte val settings = R2dbcSettings(context.system.settings.config.getConfig(sharedConfigPath)) log.debug("R2DBC snapshot store starting up with dialect [{}]", settings.dialectName) - private val connectionFactory = - ConnectionFactoryProvider(system).connectionFactoryFor(sharedConfigPath + ".connection-factory") - private val dao = settings.connectionFactorySettings.dialect.createSnapshotDao(settings, connectionFactory) - private val queryDao = settings.connectionFactorySettings.dialect.createQueryDao(settings, connectionFactory) + private val executorProvider = + new R2dbcExecutorProvider(settings, sharedConfigPath + ".connection-factory", LoggerFactory.getLogger(getClass)) + private val dao = settings.connectionFactorySettings.dialect.createSnapshotDao(settings, executorProvider) + private val queryDao = settings.connectionFactorySettings.dialect.createQueryDao(settings, executorProvider) def loadAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = dao diff --git a/core/src/main/scala/akka/persistence/r2dbc/state/scaladsl/R2dbcDurableStateStore.scala b/core/src/main/scala/akka/persistence/r2dbc/state/scaladsl/R2dbcDurableStateStore.scala index be7656e5..c8473758 100644 --- a/core/src/main/scala/akka/persistence/r2dbc/state/scaladsl/R2dbcDurableStateStore.scala +++ b/core/src/main/scala/akka/persistence/r2dbc/state/scaladsl/R2dbcDurableStateStore.scala @@ -26,7 +26,6 @@ import akka.persistence.query.UpdatedDurableState import akka.persistence.query.scaladsl.DurableStateStorePagedPersistenceIdsQuery import akka.persistence.query.typed.EventEnvelope import akka.persistence.query.typed.scaladsl.DurableStateStoreBySliceQuery -import akka.persistence.r2dbc.ConnectionFactoryProvider import akka.persistence.r2dbc.R2dbcSettings import akka.persistence.r2dbc.internal.BySliceQuery import akka.persistence.r2dbc.internal.ContinuousQuery @@ -46,6 +45,8 @@ import akka.stream.scaladsl.Source import com.typesafe.config.Config import org.slf4j.LoggerFactory +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider + object R2dbcDurableStateStore { val Identifier = "akka.persistence.r2dbc.state" @@ -65,20 +66,22 @@ class R2dbcDurableStateStore[A](system: ExtendedActorSystem, config: Config, cfg private val log = LoggerFactory.getLogger(getClass) private val sharedConfigPath = cfgPath.replaceAll("""\.state$""", "") private val settings = R2dbcSettings(system.settings.config.getConfig(sharedConfigPath)) - private val journalSettings = R2dbcSettings(system.settings.config.getConfig(sharedConfigPath)) log.debug("R2DBC journal starting up with dialect [{}]", settings.dialectName) private val typedSystem = system.toTyped private val serialization = SerializationExtension(system) private val persistenceExt = Persistence(system) - private val stateDao = settings.connectionFactorySettings.dialect.createDurableStateDao( - settings, - ConnectionFactoryProvider(typedSystem) - .connectionFactoryFor(sharedConfigPath + ".connection-factory"))(typedSystem) + // FIXME maybe this is using the wrong executionContext, H2Dialect is using another dispatcher? + private val executorProvider = + new R2dbcExecutorProvider(settings, sharedConfigPath + ".connection-factory", LoggerFactory.getLogger(getClass))( + typedSystem.executionContext, + typedSystem) + private val stateDao = + settings.connectionFactorySettings.dialect.createDurableStateDao(settings, executorProvider)(typedSystem) private val changeEventWriterUuid = UUID.randomUUID().toString private val pubSub: Option[PubSub] = - if (journalSettings.journalPublishEvents) Some(PubSub(typedSystem)) + if (settings.journalPublishEvents) Some(PubSub(typedSystem)) else None private val bySlice: BySliceQuery[SerializedStateRow, DurableStateChange[A]] = { @@ -225,7 +228,7 @@ class R2dbcDurableStateStore[A](system: ExtendedActorSystem, config: Config, cfg val entityType = PersistenceId.extractEntityType(persistenceId) val slice = persistenceExt.sliceForPersistenceId(persistenceId) - val timestamp = if (journalSettings.useAppTimestamp) InstantFactory.now() else JournalDao.EmptyDbTimestamp + val timestamp = if (settings.useAppTimestamp) InstantFactory.now() else JournalDao.EmptyDbTimestamp SerializedJournalRow( slice, diff --git a/core/src/test/resources/application-postgres-data-partitions.conf b/core/src/test/resources/application-postgres-data-partitions.conf new file mode 100644 index 00000000..d95dc9f5 --- /dev/null +++ b/core/src/test/resources/application-postgres-data-partitions.conf @@ -0,0 +1,12 @@ +# used from CI testing of 4 data partitions with 2 databases + +akka.persistence.r2dbc.data-partition { + number-of-partitions = 4 + number-of-databases = 2 +} + +akka.persistence.r2dbc.connection-factory = ${akka.persistence.r2dbc.postgres} +akka.persistence.r2dbc.connection-factory-0-1 = ${akka.persistence.r2dbc.connection-factory} +akka.persistence.r2dbc.connection-factory-2-3 = ${akka.persistence.r2dbc.connection-factory} +# second db listening on different port +akka.persistence.r2dbc.connection-factory-2-3.port = 5433 diff --git a/core/src/test/scala/akka/persistence/r2dbc/PayloadSpec.scala b/core/src/test/scala/akka/persistence/r2dbc/PayloadSpec.scala index 955dc181..5a19f71b 100644 --- a/core/src/test/scala/akka/persistence/r2dbc/PayloadSpec.scala +++ b/core/src/test/scala/akka/persistence/r2dbc/PayloadSpec.scala @@ -89,12 +89,13 @@ class PayloadSpec private def selectJournalRow(persistenceId: String): TestRow = { import settings.codecSettings.JournalImplicits.journalPayloadCodec + val slice = persistenceExt.sliceForPersistenceId(persistenceId) - r2dbcExecutor + r2dbcExecutor(slice) .selectOne[TestRow]("test")( connection => connection.createStatement( - s"select * from ${settings.journalTableWithSchema} where persistence_id = '$persistenceId'"), + s"select * from ${settings.journalTableWithSchema(slice)} where persistence_id = '$persistenceId'"), row => { val payload = row.getPayload("event_payload") TestRow( diff --git a/core/src/test/scala/akka/persistence/r2dbc/R2dbcSettingsSpec.scala b/core/src/test/scala/akka/persistence/r2dbc/R2dbcSettingsSpec.scala index c57171b5..e3171233 100644 --- a/core/src/test/scala/akka/persistence/r2dbc/R2dbcSettingsSpec.scala +++ b/core/src/test/scala/akka/persistence/r2dbc/R2dbcSettingsSpec.scala @@ -4,6 +4,8 @@ package akka.persistence.r2dbc +import com.typesafe.config.ConfigException + import akka.persistence.r2dbc.internal.postgres.PostgresDialect.PostgresConnectionFactorySettings import com.typesafe.config.{ Config, ConfigFactory } import io.r2dbc.postgresql.client.SSLMode @@ -19,10 +21,12 @@ class R2dbcSettingsSpec extends AnyWordSpec with TestSuite with Matchers { "Settings for postgres" should { "have table names with schema" in { val config = ConfigFactory - .parseString("akka.persistence.r2dbc.schema=s1") + .parseString(""" + akka.persistence.r2dbc.schema=s1 + """) .withFallback(ConfigFactory.load("application-postgres.conf")) val settings = R2dbcSettings(config.getConfig("akka.persistence.r2dbc")) - settings.journalTableWithSchema shouldBe "s1.event_journal" + settings.journalTableWithSchema(0) shouldBe "s1.event_journal" settings.snapshotsTableWithSchema shouldBe "s1.snapshot" settings.durableStateTableWithSchema shouldBe "s1.durable_state" @@ -64,4 +68,211 @@ class R2dbcSettingsSpec extends AnyWordSpec with TestSuite with Matchers { SSLMode.fromValue(connectionFactorySettings.sslMode) shouldBe SSLMode.VERIFY_FULL } } + + "data-partition settings" should { + "have no data partitions by default" in { + val config = ConfigFactory.load("application-postgres.conf") + val settings = R2dbcSettings(config.getConfig("akka.persistence.r2dbc")) + settings.numberOfDataPartitions shouldBe 1 + settings.numberOfDatabases shouldBe 1 + settings.dataPartitionSliceRanges.size shouldBe 1 + settings.dataPartitionSliceRanges.head shouldBe (0 until 1024) + settings.connectionFactorSliceRanges.size shouldBe 1 + settings.connectionFactorSliceRanges.head shouldBe (0 until 1024) + } + + "report invalid values" in { + val baseConfig = ConfigFactory.load("application-postgres.conf") + def settingsWith(numberOfPartitions: Int, numberOfDatabases: Int = 1): R2dbcSettings = { + val config = ConfigFactory + .parseString(s""" + akka.persistence.r2dbc.data-partition { + number-of-partitions = $numberOfPartitions + number-of-databases = $numberOfDatabases + } + """) + .withFallback(baseConfig) + R2dbcSettings(config.getConfig("akka.persistence.r2dbc")) + } + + intercept[IllegalArgumentException](settingsWith(numberOfPartitions = 0)) + intercept[IllegalArgumentException](settingsWith(numberOfPartitions = 1025)) + intercept[IllegalArgumentException](settingsWith(numberOfPartitions = 6)) + + intercept[IllegalArgumentException](settingsWith(numberOfPartitions = 8, numberOfDatabases = 0)) + intercept[IllegalArgumentException](settingsWith(numberOfPartitions = 8, numberOfDatabases = 1025)) + intercept[IllegalArgumentException](settingsWith(numberOfPartitions = 8, numberOfDatabases = 6)) + intercept[IllegalArgumentException](settingsWith(numberOfPartitions = 8, numberOfDatabases = 16)) + + intercept[ConfigException.Missing](settingsWith(numberOfPartitions = 8, numberOfDatabases = 2)) + } + + "result in table names with data partition suffix" in { + val config = ConfigFactory + .parseString(""" + akka.persistence.r2dbc.schema=s1 + akka.persistence.r2dbc.data-partition.number-of-partitions = 4 + """) + .withFallback(ConfigFactory.load("application-postgres.conf")) + val settings = R2dbcSettings(config.getConfig("akka.persistence.r2dbc")) + settings.journalTableWithSchema(slice = 0) shouldBe "s1.event_journal_0" + settings.journalTableWithSchema(slice = 17) shouldBe "s1.event_journal_0" + settings.journalTableWithSchema(slice = 256) shouldBe "s1.event_journal_1" + settings.journalTableWithSchema(slice = 511) shouldBe "s1.event_journal_1" + settings.journalTableWithSchema(slice = 512) shouldBe "s1.event_journal_2" + settings.journalTableWithSchema(slice = 767) shouldBe "s1.event_journal_2" + settings.journalTableWithSchema(slice = 768) shouldBe "s1.event_journal_3" + settings.journalTableWithSchema(slice = 1023) shouldBe "s1.event_journal_3" + } + + "verify slice range within same data partition" in { + val config = ConfigFactory + .parseString(""" + akka.persistence.r2dbc.data-partition.number-of-partitions = 4 + """) + .withFallback(ConfigFactory.load("application-postgres.conf")) + val settings = R2dbcSettings(config.getConfig("akka.persistence.r2dbc")) + settings.isSliceRangeWithinSameDataPartition(0, 255) shouldBe true + settings.isSliceRangeWithinSameDataPartition(256, 511) shouldBe true + settings.isSliceRangeWithinSameDataPartition(512, 767) shouldBe true + settings.isSliceRangeWithinSameDataPartition(768, 1023) shouldBe true + + settings.isSliceRangeWithinSameDataPartition(0, 1023) shouldBe false + settings.isSliceRangeWithinSameDataPartition(0, 511) shouldBe false + settings.isSliceRangeWithinSameDataPartition(512, 1023) shouldBe false + settings.isSliceRangeWithinSameDataPartition(511, 512) shouldBe false + + settings.dataPartitionSliceRanges.size shouldBe 4 + settings.dataPartitionSliceRanges(0) should be(0 until 256) + settings.dataPartitionSliceRanges(1) should be(256 until 512) + settings.dataPartitionSliceRanges(2) should be(512 until 768) + settings.dataPartitionSliceRanges(3) should be(768 until 1024) + } + + "use connection-factory per database when same number of databases as partitions" in { + val config = ConfigFactory + .parseString(""" + akka.persistence.r2dbc.data-partition { + number-of-partitions = 2 + number-of-databases = 2 + } + akka.persistence.r2dbc.connection-factory-0-0 = ${akka.persistence.r2dbc.postgres} + akka.persistence.r2dbc.connection-factory-0-0.host = hostA + akka.persistence.r2dbc.connection-factory-1-1 = ${akka.persistence.r2dbc.postgres} + akka.persistence.r2dbc.connection-factory-1-1.host = hostB + + # FIXME maybe we should support a convenience syntax for this case: + # akka.persistence.r2dbc.connection-factory-0 = ${akka.persistence.r2dbc.postgres} + # akka.persistence.r2dbc.connection-factory-1 = ${akka.persistence.r2dbc.postgres} + """) + .withFallback(ConfigFactory.load("application-postgres.conf")) + .resolve() + val settings = R2dbcSettings(config.getConfig("akka.persistence.r2dbc")) + settings.connectionFactorySettings(slice = 0).config.getString("host") shouldBe "hostA" + settings.connectionFactorySettings(slice = 17).config.getString("host") shouldBe "hostA" + settings.connectionFactorySettings(slice = 511).config.getString("host") shouldBe "hostA" + settings.connectionFactorySettings(slice = 512).config.getString("host") shouldBe "hostB" + settings.connectionFactorySettings(slice = 700).config.getString("host") shouldBe "hostB" + settings.connectionFactorySettings(slice = 1023).config.getString("host") shouldBe "hostB" + + settings.connectionFactorSliceRanges.size shouldBe 2 + settings.connectionFactorSliceRanges(0) should be(0 until 512) + settings.connectionFactorSliceRanges(1) should be(512 until 1024) + } + + "use connection-factory per database when less databases than partitions" in { + val config = ConfigFactory + .parseString(""" + akka.persistence.r2dbc.data-partition { + number-of-partitions = 8 + number-of-databases = 2 + } + akka.persistence.r2dbc.connection-factory-0-3 = ${akka.persistence.r2dbc.postgres} + akka.persistence.r2dbc.connection-factory-0-3.host = hostA + akka.persistence.r2dbc.connection-factory-4-7 = ${akka.persistence.r2dbc.postgres} + akka.persistence.r2dbc.connection-factory-4-7.host = hostB + """) + .withFallback(ConfigFactory.load("application-postgres.conf")) + .resolve() + val settings = R2dbcSettings(config.getConfig("akka.persistence.r2dbc")) + settings.connectionFactorySettings(slice = 0).config.getString("host") shouldBe "hostA" + settings.connectionFactorySettings(slice = 17).config.getString("host") shouldBe "hostA" + settings.connectionFactorySettings(slice = 511).config.getString("host") shouldBe "hostA" + settings.connectionFactorySettings(slice = 512).config.getString("host") shouldBe "hostB" + settings.connectionFactorySettings(slice = 700).config.getString("host") shouldBe "hostB" + settings.connectionFactorySettings(slice = 1023).config.getString("host") shouldBe "hostB" + + settings.connectionFactorSliceRanges.size shouldBe 2 + settings.connectionFactorSliceRanges(0) should be(0 until 512) + settings.connectionFactorSliceRanges(1) should be(512 until 1024) + } + + "derive connection-factory config property from number of partitions and databases" in { + val config = ConfigFactory + .parseString(""" + akka.persistence.r2dbc.data-partition { + number-of-partitions = 8 + number-of-databases = 2 + } + akka.persistence.r2dbc.connection-factory-0-3 = ${akka.persistence.r2dbc.postgres} + akka.persistence.r2dbc.connection-factory-0-3.host = hostA + akka.persistence.r2dbc.connection-factory-4-7 = ${akka.persistence.r2dbc.postgres} + akka.persistence.r2dbc.connection-factory-4-7.host = hostB + """) + .withFallback(ConfigFactory.load("application-postgres.conf")) + .resolve() + val settings = R2dbcSettings(config.getConfig("akka.persistence.r2dbc")) + settings.resolveConnectionFactoryConfigPath( + "a.b.connection-factory", + slice = 0) shouldBe "a.b.connection-factory-0-3" + settings.resolveConnectionFactoryConfigPath( + "a.b.connection-factory", + slice = 17) shouldBe "a.b.connection-factory-0-3" + settings.resolveConnectionFactoryConfigPath( + "a.b.connection-factory", + slice = 511) shouldBe "a.b.connection-factory-0-3" + settings.resolveConnectionFactoryConfigPath( + "a.b.connection-factory", + slice = 512) shouldBe "a.b.connection-factory-4-7" + settings.resolveConnectionFactoryConfigPath( + "a.b.connection-factory", + slice = 700) shouldBe "a.b.connection-factory-4-7" + settings.resolveConnectionFactoryConfigPath( + "a.b.connection-factory", + slice = 1023) shouldBe "a.b.connection-factory-4-7" + + settings.connectionFactorSliceRanges.size shouldBe 2 + settings.connectionFactorSliceRanges(0) should be(0 until 512) + settings.connectionFactorSliceRanges(1) should be(512 until 1024) + + val configPaths = + R2dbcSettings.connectionFactoryConfigPaths( + "a.b.connection-factory", + numberOfDataPartitions = 8, + numberOfDatabases = 2) + configPaths.size shouldBe 2 + configPaths(0) shouldBe "a.b.connection-factory-0-3" + configPaths(1) shouldBe "a.b.connection-factory-4-7" + } + + "use default connection-factory config property when one database" in { + val config = ConfigFactory + .parseString(""" + akka.persistence.r2dbc.data-partition { + number-of-partitions = 8 + number-of-databases = 1 + } + """) + .withFallback(ConfigFactory.load("application-postgres.conf")) + val settings = R2dbcSettings(config.getConfig("akka.persistence.r2dbc")) + settings.resolveConnectionFactoryConfigPath("a.b.connection-factory", slice = 0) shouldBe "a.b.connection-factory" + settings.resolveConnectionFactoryConfigPath( + "a.b.connection-factory", + slice = 1023) shouldBe "a.b.connection-factory" + + settings.connectionFactorSliceRanges.size shouldBe 1 + settings.connectionFactorSliceRanges(0) should be(0 until 1024) + } + + } } diff --git a/core/src/test/scala/akka/persistence/r2dbc/TestDbLifecycle.scala b/core/src/test/scala/akka/persistence/r2dbc/TestDbLifecycle.scala index bc88637a..429e1680 100644 --- a/core/src/test/scala/akka/persistence/r2dbc/TestDbLifecycle.scala +++ b/core/src/test/scala/akka/persistence/r2dbc/TestDbLifecycle.scala @@ -6,18 +6,22 @@ package akka.persistence.r2dbc import scala.concurrent.Await import scala.concurrent.duration._ + import akka.actor.typed.ActorSystem import akka.persistence.Persistence import akka.persistence.r2dbc.internal.R2dbcExecutor import org.scalatest.BeforeAndAfterAll import org.scalatest.Suite import org.slf4j.LoggerFactory + import akka.persistence.r2dbc.internal.Sql.InterpolationWithAdapter import akka.persistence.r2dbc.internal.h2.H2Dialect - import java.time.Instant + import scala.util.control.NonFatal +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider + trait TestDbLifecycle extends BeforeAndAfterAll { this: Suite => def typedSystem: ActorSystem[_] @@ -27,25 +31,31 @@ trait TestDbLifecycle extends BeforeAndAfterAll { this: Suite => lazy val r2dbcSettings: R2dbcSettings = R2dbcSettings(typedSystem.settings.config.getConfig(testConfigPath)) - lazy val r2dbcExecutor: R2dbcExecutor = { - new R2dbcExecutor( - ConnectionFactoryProvider(typedSystem) - .connectionFactoryFor(testConfigPath + ".connection-factory"), - LoggerFactory.getLogger(getClass), - r2dbcSettings.logDbCallsExceeding, - r2dbcSettings.connectionFactorySettings.poolSettings.closeCallsExceeding)( + lazy val r2dbcExecutorProvider: R2dbcExecutorProvider = + new R2dbcExecutorProvider(r2dbcSettings, testConfigPath + ".connection-factory", LoggerFactory.getLogger(getClass))( typedSystem.executionContext, typedSystem) - } + + def r2dbcExecutor(slice: Int): R2dbcExecutor = + r2dbcExecutorProvider.executorFor(slice) + + // FIXME maybe remove, and always use the r2dbcExecutorProvider with explicit slice + lazy val r2dbcExecutor: R2dbcExecutor = + r2dbcExecutor(slice = 0) lazy val persistenceExt: Persistence = Persistence(typedSystem) + def pendingIfMoreThanOneDataPartition(): Unit = + if (r2dbcSettings.numberOfDataPartitions > 1) + pending + override protected def beforeAll(): Unit = { try { - Await.result( - r2dbcExecutor.updateOne("beforeAll delete")( - _.createStatement(s"delete from ${r2dbcSettings.journalTableWithSchema}")), - 10.seconds) + r2dbcSettings.allJournalTablesWithSchema.foreach { case (table, minSlice) => + Await.result( + r2dbcExecutor(minSlice).updateOne("beforeAll delete")(_.createStatement(s"delete from $table")), + 10.seconds) + } Await.result( r2dbcExecutor.updateOne("beforeAll delete")( _.createStatement(s"delete from ${r2dbcSettings.snapshotsTableWithSchema}")), diff --git a/core/src/test/scala/akka/persistence/r2dbc/cleanup/scaladsl/EventSourcedCleanupSpec.scala b/core/src/test/scala/akka/persistence/r2dbc/cleanup/scaladsl/EventSourcedCleanupSpec.scala index c5e60b2c..e4099582 100644 --- a/core/src/test/scala/akka/persistence/r2dbc/cleanup/scaladsl/EventSourcedCleanupSpec.scala +++ b/core/src/test/scala/akka/persistence/r2dbc/cleanup/scaladsl/EventSourcedCleanupSpec.scala @@ -4,6 +4,8 @@ package akka.persistence.r2dbc.cleanup.scaladsl +import java.util.UUID + import scala.concurrent.duration._ import akka.Done @@ -51,6 +53,19 @@ class EventSourcedCleanupSpec override def typedSystem: ActorSystem[_] = system + // find two different persistenceIds that are both in the slice range 0-255 so that this test can run with + // 4 data partitions + private def pidsWithSliceLessThan256(entityType: String) = { + var pid1: PersistenceId = null + var pid2: PersistenceId = null + while (pid1 == pid2 || persistenceExt.sliceForPersistenceId(pid1.id) > 255 || persistenceExt + .sliceForPersistenceId(pid2.id) > 255) { + pid1 = PersistenceId(entityType, UUID.randomUUID().toString) + pid2 = PersistenceId(entityType, UUID.randomUUID().toString) + } + (pid1, pid2) + } + "EventSourcedCleanup" must { "delete events for one persistenceId" in { val ackProbe = createTestProbe[Done]() @@ -377,9 +392,8 @@ class EventSourcedCleanupSpec "delete events for slice before timestamp" in { val ackProbe = createTestProbe[Done]() val entityType = nextEntityType() - val pid1 = PersistenceId(entityType, "a") - val pid2 = PersistenceId(entityType, "b") - persistenceExt.sliceForPersistenceId(pid1.id) should not be persistenceExt.sliceForPersistenceId(pid2.id) + + var (pid1, pid2) = pidsWithSliceLessThan256(entityType) val p1 = spawn(Persister(pid1)) val p2 = spawn(Persister(pid2)) @@ -398,7 +412,7 @@ class EventSourcedCleanupSpec PersistenceQuery(system).readJournalFor[CurrentEventsBySliceQuery](R2dbcReadJournal.Identifier) val eventsBefore = journalQuery - .currentEventsBySlices[Any](entityType, 0, persistenceExt.numberOfSlices - 1, Offset.noOffset) + .currentEventsBySlices[Any](entityType, 0, 255, Offset.noOffset) .runWith(Sink.seq) .futureValue eventsBefore.size shouldBe 10 @@ -412,7 +426,7 @@ class EventSourcedCleanupSpec val eventsAfter = journalQuery - .currentEventsBySlices[Any](entityType, 0, persistenceExt.numberOfSlices - 1, Offset.noOffset) + .currentEventsBySlices[Any](entityType, 0, 255, Offset.noOffset) .runWith(Sink.seq) .futureValue eventsAfter.count(_.persistenceId == pid1.id) shouldBe 5 diff --git a/core/src/test/scala/akka/persistence/r2dbc/internal/H2AdditionalInitForSchemaSpec.scala b/core/src/test/scala/akka/persistence/r2dbc/internal/H2AdditionalInitForSchemaSpec.scala index b35c4e08..d3e9f6d6 100644 --- a/core/src/test/scala/akka/persistence/r2dbc/internal/H2AdditionalInitForSchemaSpec.scala +++ b/core/src/test/scala/akka/persistence/r2dbc/internal/H2AdditionalInitForSchemaSpec.scala @@ -32,6 +32,9 @@ object H2AdditionalInitForSchemaSpec { additional-init = "alter table durable_state add if not exists col1 varchar(256)" } // #additionalColumn + + # when testing with number-of-databases > 1 we must override that for H2 + akka.persistence.r2dbc.data-partition.number-of-databases = 1 """) .withFallback(ConfigFactory.load()) .resolve() diff --git a/core/src/test/scala/akka/persistence/r2dbc/internal/R2dbcExecutorSpec.scala b/core/src/test/scala/akka/persistence/r2dbc/internal/R2dbcExecutorSpec.scala index 2dfa31a2..28c3d22f 100644 --- a/core/src/test/scala/akka/persistence/r2dbc/internal/R2dbcExecutorSpec.scala +++ b/core/src/test/scala/akka/persistence/r2dbc/internal/R2dbcExecutorSpec.scala @@ -13,7 +13,6 @@ import akka.actor.typed.ActorSystem import akka.persistence.r2dbc.TestConfig import akka.persistence.r2dbc.TestData import akka.persistence.r2dbc.TestDbLifecycle -import akka.persistence.r2dbc.internal.h2.H2Dialect import akka.persistence.r2dbc.internal.postgres.PostgresDialect import akka.persistence.r2dbc.internal.postgres.YugabyteDialect import com.typesafe.config.Config @@ -73,6 +72,8 @@ class R2dbcExecutorSpec } "R2dbcExecutor" should { + // when number-of-databases > 1 the test config above will not be used + pendingIfMoreThanOneDataPartition() "close connection when no response from update" in { pendingIfCannotBeTestedWithDialect() diff --git a/core/src/test/scala/akka/persistence/r2dbc/journal/PersistSerializedEventSpec.scala b/core/src/test/scala/akka/persistence/r2dbc/journal/PersistSerializedEventSpec.scala index 303fa0d3..08c10762 100644 --- a/core/src/test/scala/akka/persistence/r2dbc/journal/PersistSerializedEventSpec.scala +++ b/core/src/test/scala/akka/persistence/r2dbc/journal/PersistSerializedEventSpec.scala @@ -38,6 +38,7 @@ class PersistSerializedEventSpec val entityType = nextEntityType() val persistenceId = PersistenceId.ofUniqueId(nextPid(entityType)) + val slice = persistenceExt.sliceForPersistenceId(persistenceId.id) val ref = spawn(Persister(persistenceId, Set.empty)) // String serialization has no manifest @@ -69,11 +70,11 @@ class PersistSerializedEventSpec replyProbe.expectMessage("e1|Some(e2)") val rows = - r2dbcExecutor + r2dbcExecutor(slice) .select[Row]("test")( connection => connection.createStatement( - s"select * from ${settings.journalTableWithSchema} where persistence_id = '${persistenceId.id}'"), + s"select * from ${settings.journalTableWithSchema(slice)} where persistence_id = '${persistenceId.id}'"), row => { Row( pid = row.get("persistence_id", classOf[String]), diff --git a/core/src/test/scala/akka/persistence/r2dbc/journal/PersistTagsSpec.scala b/core/src/test/scala/akka/persistence/r2dbc/journal/PersistTagsSpec.scala index 6bbba9ad..5351a7d3 100644 --- a/core/src/test/scala/akka/persistence/r2dbc/journal/PersistTagsSpec.scala +++ b/core/src/test/scala/akka/persistence/r2dbc/journal/PersistTagsSpec.scala @@ -31,6 +31,23 @@ class PersistTagsSpec import settings.codecSettings.JournalImplicits.tagsCodec case class Row(pid: String, seqNr: Long, tags: Set[String]) + private def selectRows(table: String, minSlice: Int): IndexedSeq[Row] = { + r2dbcExecutor(minSlice) + .select[Row]("test")( + connection => connection.createStatement(s"select * from $table"), + row => + Row( + pid = row.get("persistence_id", classOf[String]), + seqNr = row.get[java.lang.Long]("seq_nr", classOf[java.lang.Long]), + row.getTags("tags"))) + .futureValue + } + + private def selectAllRows(): IndexedSeq[Row] = + r2dbcSettings.allJournalTablesWithSchema.toVector.sortBy(_._1).flatMap { case (table, minSlice) => + selectRows(table, minSlice) + } + "Persist tags" should { "be the same for events stored in same transaction" in { @@ -53,16 +70,7 @@ class PersistTagsSpec } pingProbe.receiveMessages(entities.size, 20.seconds) - val rows = - r2dbcExecutor - .select[Row]("test")( - connection => connection.createStatement(s"select * from ${settings.journalTableWithSchema}"), - row => - Row( - pid = row.get("persistence_id", classOf[String]), - seqNr = row.get[java.lang.Long]("seq_nr", classOf[java.lang.Long]), - row.getTags("tags"))) - .futureValue + val rows = selectAllRows() rows.foreach { case Row(pid, _, tags) => withClue(s"pid [$pid}]: ") { diff --git a/core/src/test/scala/akka/persistence/r2dbc/journal/PersistTimestampSpec.scala b/core/src/test/scala/akka/persistence/r2dbc/journal/PersistTimestampSpec.scala index 6f356555..bacf5cf5 100644 --- a/core/src/test/scala/akka/persistence/r2dbc/journal/PersistTimestampSpec.scala +++ b/core/src/test/scala/akka/persistence/r2dbc/journal/PersistTimestampSpec.scala @@ -42,6 +42,32 @@ class PersistTimestampSpec else PostgresTimestampCodec + private def selectRows(table: String, minSlice: Int): IndexedSeq[Row] = { + r2dbcExecutor(minSlice) + .select[Row]("test")( + connection => connection.createStatement(s"select * from $table"), + row => { + val event = serialization + .deserialize( + row.getPayload("event_payload"), + row.get("event_ser_id", classOf[Integer]), + row.get("event_ser_manifest", classOf[String])) + .get + .asInstanceOf[String] + Row( + pid = row.get("persistence_id", classOf[String]), + seqNr = row.get[java.lang.Long]("seq_nr", classOf[java.lang.Long]), + dbTimestamp = row.getTimestamp("db_timestamp"), + event) + }) + .futureValue + } + + private def selectAllRows(): IndexedSeq[Row] = + r2dbcSettings.allJournalTablesWithSchema.toVector.sortBy(_._1).flatMap { case (table, minSlice) => + selectRows(table, minSlice) + } + "Persist timestamp" should { "be the same for events stored in same transaction" in { @@ -71,25 +97,7 @@ class PersistTimestampSpec } pingProbe.receiveMessages(entities.size, 20.seconds) - val rows = - r2dbcExecutor - .select[Row]("test")( - connection => connection.createStatement(s"select * from ${settings.journalTableWithSchema}"), - row => { - val event = serialization - .deserialize( - row.getPayload("event_payload"), - row.get("event_ser_id", classOf[Integer]), - row.get("event_ser_manifest", classOf[String])) - .get - .asInstanceOf[String] - Row( - pid = row.get("persistence_id", classOf[String]), - seqNr = row.get[java.lang.Long]("seq_nr", classOf[java.lang.Long]), - dbTimestamp = row.getTimestamp("db_timestamp"), - event) - }) - .futureValue + val rows = selectAllRows() rows.groupBy(_.event).foreach { case (_, rowsByUniqueEvent) => withClue(s"pid [${rowsByUniqueEvent.head.pid}]: ") { diff --git a/core/src/test/scala/akka/persistence/r2dbc/query/CurrentPersistenceIdsQuerySpec.scala b/core/src/test/scala/akka/persistence/r2dbc/query/CurrentPersistenceIdsQuerySpec.scala index 1c70ee62..37aaa1f4 100644 --- a/core/src/test/scala/akka/persistence/r2dbc/query/CurrentPersistenceIdsQuerySpec.scala +++ b/core/src/test/scala/akka/persistence/r2dbc/query/CurrentPersistenceIdsQuerySpec.scala @@ -63,6 +63,7 @@ class CurrentPersistenceIdsQuerySpec } "Event Sourced currentPersistenceIds" should { + "retrieve all ids" in { val result = query.currentPersistenceIds().runWith(Sink.seq).futureValue result shouldBe pids.map(_.id) diff --git a/core/src/test/scala/akka/persistence/r2dbc/query/EventsBySliceBacktrackingSpec.scala b/core/src/test/scala/akka/persistence/r2dbc/query/EventsBySliceBacktrackingSpec.scala index 551a03a6..3ccabfef 100644 --- a/core/src/test/scala/akka/persistence/r2dbc/query/EventsBySliceBacktrackingSpec.scala +++ b/core/src/test/scala/akka/persistence/r2dbc/query/EventsBySliceBacktrackingSpec.scala @@ -67,12 +67,12 @@ class EventsBySliceBacktrackingSpec private def writeEvent(slice: Int, persistenceId: String, seqNr: Long, timestamp: Instant, event: String): Unit = { log.debugN("Write test event [{}] [{}] [{}] at time [{}]", persistenceId, seqNr, event, timestamp) val insertEventSql = sql""" - INSERT INTO ${settings.journalTableWithSchema} + INSERT INTO ${settings.journalTableWithSchema(slice)} (slice, entity_type, persistence_id, seq_nr, db_timestamp, writer, adapter_manifest, event_ser_id, event_ser_manifest, event_payload) VALUES (?, ?, ?, ?, ?, '', '', ?, '', ?)""" val entityType = PersistenceId.extractEntityType(persistenceId) - val result = r2dbcExecutor.updateOne("test writeEvent") { connection => + val result = r2dbcExecutor(slice).updateOne("test writeEvent") { connection => connection .createStatement(insertEventSql) .bind(0, slice) @@ -89,6 +89,8 @@ class EventsBySliceBacktrackingSpec "eventsBySlices backtracking" should { "find old events with earlier timestamp" in { + pendingIfMoreThanOneDataPartition() + // this scenario is handled by the backtracking query val entityType = nextEntityType() val pid1 = nextPid(entityType) @@ -179,6 +181,8 @@ class EventsBySliceBacktrackingSpec } "emit from backtracking after first normal query" in { + pendingIfMoreThanOneDataPartition() + val entityType = nextEntityType() val pid1 = nextPid(entityType) val pid2 = nextPid(entityType) @@ -237,6 +241,8 @@ class EventsBySliceBacktrackingSpec } "predict backtracking filtered events based on latest seen counts" in { + pendingIfMoreThanOneDataPartition() + val entityType = nextEntityType() val pid = nextPid(entityType) val slice = query.sliceForPersistenceId(pid) diff --git a/core/src/test/scala/akka/persistence/r2dbc/query/EventsBySlicePerfSpec.scala b/core/src/test/scala/akka/persistence/r2dbc/query/EventsBySlicePerfSpec.scala index ffc1aef3..de083e1a 100644 --- a/core/src/test/scala/akka/persistence/r2dbc/query/EventsBySlicePerfSpec.scala +++ b/core/src/test/scala/akka/persistence/r2dbc/query/EventsBySlicePerfSpec.scala @@ -131,6 +131,8 @@ class EventsBySlicePerfSpec } "write and read concurrently" in { + pendingIfMoreThanOneDataPartition() + // increase these properties for "real" testing // also, remove LogCapturing and change logback log levels for "real" testing val numberOfEventsPerWriter = 20 diff --git a/core/src/test/scala/akka/persistence/r2dbc/state/DurableStateUpdateWithChangeEventStoreSpec.scala b/core/src/test/scala/akka/persistence/r2dbc/state/DurableStateUpdateWithChangeEventStoreSpec.scala index c7578c3d..40e1d2a5 100644 --- a/core/src/test/scala/akka/persistence/r2dbc/state/DurableStateUpdateWithChangeEventStoreSpec.scala +++ b/core/src/test/scala/akka/persistence/r2dbc/state/DurableStateUpdateWithChangeEventStoreSpec.scala @@ -4,8 +4,6 @@ package akka.persistence.r2dbc.state -import org.scalatest.concurrent.ScalaFutures.convertScalaFuture - import akka.actor.testkit.typed.scaladsl.LogCapturing import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit import akka.actor.typed.ActorSystem @@ -47,6 +45,8 @@ class DurableStateUpdateWithChangeEventStoreSpec private val tag = "TAG" "The R2DBC durable state store" should { + pendingIfMoreThanOneDataPartition() // FIXME + "save additional change event" in { val entityType = nextEntityType() val persistenceId = PersistenceId(entityType, "my-persistenceId").id @@ -176,37 +176,36 @@ class DurableStateUpdateWithChangeEventStoreSpec envelopes.size shouldBe 2 } - } - - "publish change event" in { - val entityType = nextEntityType() - val persistenceId = PersistenceId(entityType, "my-persistenceId").id + "publish change event" in { + val entityType = nextEntityType() + val persistenceId = PersistenceId(entityType, "my-persistenceId").id - val slice = persistenceExt.sliceForPersistenceId(persistenceId) - val topic = PubSub(system).eventTopic[String](entityType, slice) - val subscriberProbe = createTestProbe[EventEnvelope[String]]() - topic ! Topic.Subscribe(subscriberProbe.ref) + val slice = persistenceExt.sliceForPersistenceId(persistenceId) + val topic = PubSub(system).eventTopic[String](entityType, slice) + val subscriberProbe = createTestProbe[EventEnvelope[String]]() + topic ! Topic.Subscribe(subscriberProbe.ref) - val value1 = "Genuinely Collaborative" - val value2 = "Open to Feedback" + val value1 = "Genuinely Collaborative" + val value2 = "Open to Feedback" - store.upsertObject(persistenceId, 1L, value1, tag, s"Changed to $value1").futureValue - store.upsertObject(persistenceId, 2L, value2, tag, s"Changed to $value2").futureValue - store.deleteObject(persistenceId, 3L, "Deleted").futureValue + store.upsertObject(persistenceId, 1L, value1, tag, s"Changed to $value1").futureValue + store.upsertObject(persistenceId, 2L, value2, tag, s"Changed to $value2").futureValue + store.deleteObject(persistenceId, 3L, "Deleted").futureValue - val env1 = subscriberProbe.receiveMessage() - env1.event shouldBe s"Changed to $value1" - env1.sequenceNr shouldBe 1L - env1.tags shouldBe Set(tag) - env1.source shouldBe EnvelopeOrigin.SourcePubSub + val env1 = subscriberProbe.receiveMessage() + env1.event shouldBe s"Changed to $value1" + env1.sequenceNr shouldBe 1L + env1.tags shouldBe Set(tag) + env1.source shouldBe EnvelopeOrigin.SourcePubSub - val env2 = subscriberProbe.receiveMessage() - env2.event shouldBe s"Changed to $value2" - env2.sequenceNr shouldBe 2L + val env2 = subscriberProbe.receiveMessage() + env2.event shouldBe s"Changed to $value2" + env2.sequenceNr shouldBe 2L - val env3 = subscriberProbe.receiveMessage() - env3.event shouldBe s"Deleted" - env3.sequenceNr shouldBe 3L + val env3 = subscriberProbe.receiveMessage() + env3.event shouldBe s"Deleted" + env3.sequenceNr shouldBe 3L + } } } diff --git a/ddl-scripts/create_tables_postgres_0-1.sql b/ddl-scripts/create_tables_postgres_0-1.sql new file mode 100644 index 00000000..7daaab47 --- /dev/null +++ b/ddl-scripts/create_tables_postgres_0-1.sql @@ -0,0 +1,90 @@ +-- tables for data partition 0 and 1 + +CREATE TABLE IF NOT EXISTS event_journal_0( + slice INT NOT NULL, + entity_type VARCHAR(255) NOT NULL, + persistence_id VARCHAR(255) NOT NULL, + seq_nr BIGINT NOT NULL, + db_timestamp timestamp with time zone NOT NULL, + + event_ser_id INTEGER NOT NULL, + event_ser_manifest VARCHAR(255) NOT NULL, + event_payload BYTEA NOT NULL, + + deleted BOOLEAN DEFAULT FALSE NOT NULL, + writer VARCHAR(255) NOT NULL, + adapter_manifest VARCHAR(255), + tags TEXT ARRAY, + + meta_ser_id INTEGER, + meta_ser_manifest VARCHAR(255), + meta_payload BYTEA, + + PRIMARY KEY(persistence_id, seq_nr) +); + +CREATE TABLE IF NOT EXISTS event_journal_1( + slice INT NOT NULL, + entity_type VARCHAR(255) NOT NULL, + persistence_id VARCHAR(255) NOT NULL, + seq_nr BIGINT NOT NULL, + db_timestamp timestamp with time zone NOT NULL, + + event_ser_id INTEGER NOT NULL, + event_ser_manifest VARCHAR(255) NOT NULL, + event_payload BYTEA NOT NULL, + + deleted BOOLEAN DEFAULT FALSE NOT NULL, + writer VARCHAR(255) NOT NULL, + adapter_manifest VARCHAR(255), + tags TEXT ARRAY, + + meta_ser_id INTEGER, + meta_ser_manifest VARCHAR(255), + meta_payload BYTEA, + + PRIMARY KEY(persistence_id, seq_nr) +); + +-- `event_journal_slice_idx` is only needed if the slice based queries are used +CREATE INDEX IF NOT EXISTS event_journal_0_slice_idx ON event_journal_0(slice, entity_type, db_timestamp, seq_nr); +CREATE INDEX IF NOT EXISTS event_journal_1_slice_idx ON event_journal_1(slice, entity_type, db_timestamp, seq_nr); + +CREATE TABLE IF NOT EXISTS snapshot( + slice INT NOT NULL, + entity_type VARCHAR(255) NOT NULL, + persistence_id VARCHAR(255) NOT NULL, + seq_nr BIGINT NOT NULL, + db_timestamp timestamp with time zone, + write_timestamp BIGINT NOT NULL, + ser_id INTEGER NOT NULL, + ser_manifest VARCHAR(255) NOT NULL, + snapshot BYTEA NOT NULL, + tags TEXT ARRAY, + meta_ser_id INTEGER, + meta_ser_manifest VARCHAR(255), + meta_payload BYTEA, + + PRIMARY KEY(persistence_id) +); + +-- `snapshot_slice_idx` is only needed if the slice based queries are used together with snapshot as starting point +CREATE INDEX IF NOT EXISTS snapshot_slice_idx ON snapshot(slice, entity_type, db_timestamp); + +CREATE TABLE IF NOT EXISTS durable_state ( + slice INT NOT NULL, + entity_type VARCHAR(255) NOT NULL, + persistence_id VARCHAR(255) NOT NULL, + revision BIGINT NOT NULL, + db_timestamp timestamp with time zone NOT NULL, + + state_ser_id INTEGER NOT NULL, + state_ser_manifest VARCHAR(255), + state_payload BYTEA NOT NULL, + tags TEXT ARRAY, + + PRIMARY KEY(persistence_id, revision) +); + +-- `durable_state_slice_idx` is only needed if the slice based queries are used +CREATE INDEX IF NOT EXISTS durable_state_slice_idx ON durable_state(slice, entity_type, db_timestamp, revision); diff --git a/ddl-scripts/create_tables_postgres_2-3.sql b/ddl-scripts/create_tables_postgres_2-3.sql new file mode 100644 index 00000000..a8e807a3 --- /dev/null +++ b/ddl-scripts/create_tables_postgres_2-3.sql @@ -0,0 +1,90 @@ +-- tables for data partition 2 and 3 + +CREATE TABLE IF NOT EXISTS event_journal_2( + slice INT NOT NULL, + entity_type VARCHAR(255) NOT NULL, + persistence_id VARCHAR(255) NOT NULL, + seq_nr BIGINT NOT NULL, + db_timestamp timestamp with time zone NOT NULL, + + event_ser_id INTEGER NOT NULL, + event_ser_manifest VARCHAR(255) NOT NULL, + event_payload BYTEA NOT NULL, + + deleted BOOLEAN DEFAULT FALSE NOT NULL, + writer VARCHAR(255) NOT NULL, + adapter_manifest VARCHAR(255), + tags TEXT ARRAY, + + meta_ser_id INTEGER, + meta_ser_manifest VARCHAR(255), + meta_payload BYTEA, + + PRIMARY KEY(persistence_id, seq_nr) +); + +CREATE TABLE IF NOT EXISTS event_journal_3( + slice INT NOT NULL, + entity_type VARCHAR(255) NOT NULL, + persistence_id VARCHAR(255) NOT NULL, + seq_nr BIGINT NOT NULL, + db_timestamp timestamp with time zone NOT NULL, + + event_ser_id INTEGER NOT NULL, + event_ser_manifest VARCHAR(255) NOT NULL, + event_payload BYTEA NOT NULL, + + deleted BOOLEAN DEFAULT FALSE NOT NULL, + writer VARCHAR(255) NOT NULL, + adapter_manifest VARCHAR(255), + tags TEXT ARRAY, + + meta_ser_id INTEGER, + meta_ser_manifest VARCHAR(255), + meta_payload BYTEA, + + PRIMARY KEY(persistence_id, seq_nr) +); + +-- `event_journal_slice_idx` is only needed if the slice based queries are used +CREATE INDEX IF NOT EXISTS event_journal_2_slice_idx ON event_journal_2(slice, entity_type, db_timestamp, seq_nr); +CREATE INDEX IF NOT EXISTS event_journal_3_slice_idx ON event_journal_3(slice, entity_type, db_timestamp, seq_nr); + +CREATE TABLE IF NOT EXISTS snapshot( + slice INT NOT NULL, + entity_type VARCHAR(255) NOT NULL, + persistence_id VARCHAR(255) NOT NULL, + seq_nr BIGINT NOT NULL, + db_timestamp timestamp with time zone, + write_timestamp BIGINT NOT NULL, + ser_id INTEGER NOT NULL, + ser_manifest VARCHAR(255) NOT NULL, + snapshot BYTEA NOT NULL, + tags TEXT ARRAY, + meta_ser_id INTEGER, + meta_ser_manifest VARCHAR(255), + meta_payload BYTEA, + + PRIMARY KEY(persistence_id) +); + +-- `snapshot_slice_idx` is only needed if the slice based queries are used together with snapshot as starting point +CREATE INDEX IF NOT EXISTS snapshot_slice_idx ON snapshot(slice, entity_type, db_timestamp); + +CREATE TABLE IF NOT EXISTS durable_state ( + slice INT NOT NULL, + entity_type VARCHAR(255) NOT NULL, + persistence_id VARCHAR(255) NOT NULL, + revision BIGINT NOT NULL, + db_timestamp timestamp with time zone NOT NULL, + + state_ser_id INTEGER NOT NULL, + state_ser_manifest VARCHAR(255), + state_payload BYTEA NOT NULL, + tags TEXT ARRAY, + + PRIMARY KEY(persistence_id, revision) +); + +-- `durable_state_slice_idx` is only needed if the slice based queries are used +CREATE INDEX IF NOT EXISTS durable_state_slice_idx ON durable_state(slice, entity_type, db_timestamp, revision); diff --git a/docker/docker-compose-postgres-2.yml b/docker/docker-compose-postgres-2.yml new file mode 100644 index 00000000..35369902 --- /dev/null +++ b/docker/docker-compose-postgres-2.yml @@ -0,0 +1,30 @@ +version: '2.2' +services: + postgres-db-0: + image: postgres:latest + container_name: postgres-db-0 + ports: + - 5432:5432 + environment: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + healthcheck: + test: ['CMD', 'pg_isready', "-q", "-d", "postgres", "-U", "postgres"] + interval: 5s + retries: 5 + start_period: 5s + timeout: 5s + postgres-db-1: + image: postgres:latest + container_name: postgres-db-1 + ports: + - 5433:5432 + environment: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + healthcheck: + test: [ 'CMD', 'pg_isready', "-q", "-d", "postgres", "-U", "postgres" ] + interval: 5s + retries: 5 + start_period: 5s + timeout: 5s diff --git a/migration/src/main/scala/akka/persistence/r2dbc/migration/MigrationTool.scala b/migration/src/main/scala/akka/persistence/r2dbc/migration/MigrationTool.scala index d3a96344..ef2c2f43 100644 --- a/migration/src/main/scala/akka/persistence/r2dbc/migration/MigrationTool.scala +++ b/migration/src/main/scala/akka/persistence/r2dbc/migration/MigrationTool.scala @@ -6,7 +6,6 @@ package akka.persistence.r2dbc.migration import java.time.Instant -import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.Failure @@ -30,7 +29,6 @@ import akka.persistence.query.PersistenceQuery import akka.persistence.query.scaladsl.CurrentEventsByPersistenceIdQuery import akka.persistence.query.scaladsl.CurrentPersistenceIdsQuery import akka.persistence.query.scaladsl.ReadJournal -import akka.persistence.r2dbc.ConnectionFactoryProvider import akka.persistence.r2dbc.R2dbcSettings import akka.persistence.r2dbc.internal.SerializedEventMetadata import akka.persistence.r2dbc.internal.SnapshotDao @@ -49,6 +47,7 @@ import io.r2dbc.spi.R2dbcDataIntegrityViolationException import org.slf4j.LoggerFactory import akka.persistence.r2dbc.internal.DurableStateDao.SerializedStateRow +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider import akka.persistence.state.DurableStateStoreRegistry import akka.persistence.state.scaladsl.DurableStateStore import akka.persistence.state.scaladsl.GetObjectResult @@ -126,16 +125,19 @@ class MigrationTool(system: ActorSystem[_]) { private val serialization: Serialization = SerializationExtension(system) - private val targetConnectionFactory = ConnectionFactoryProvider(system) - .connectionFactoryFor(targetPluginId + ".connection-factory") + private val targetExecutorProvider = new R2dbcExecutorProvider( + targetR2dbcSettings, + targetPluginId + ".connection-factory", + LoggerFactory.getLogger(getClass)) + private val targetJournalDao = - targetR2dbcSettings.connectionFactorySettings.dialect.createJournalDao(targetR2dbcSettings, targetConnectionFactory) + targetR2dbcSettings.connectionFactorySettings.dialect.createJournalDao(targetR2dbcSettings, targetExecutorProvider) private val targetSnapshotDao = targetR2dbcSettings.connectionFactorySettings.dialect - .createSnapshotDao(targetR2dbcSettings, targetConnectionFactory) + .createSnapshotDao(targetR2dbcSettings, targetExecutorProvider) private val targetDurableStateDao = targetR2dbcSettings.connectionFactorySettings.dialect - .createDurableStateDao(targetR2dbcSettings, targetConnectionFactory) + .createDurableStateDao(targetR2dbcSettings, targetExecutorProvider) private val targetBatch = migrationConfig.getInt("target.batch") @@ -154,11 +156,7 @@ class MigrationTool(system: ActorSystem[_]) { if (targetR2dbcSettings.dialectName == "h2") { log.error("Migrating to H2 using the migration tool not currently supported") } - private[r2dbc] val migrationDao = - new MigrationToolDao( - targetConnectionFactory, - targetR2dbcSettings.logDbCallsExceeding, - targetR2dbcSettings.connectionFactorySettings.poolSettings.closeCallsExceeding) + private[r2dbc] val migrationDao = new MigrationToolDao(targetExecutorProvider) private lazy val createProgressTable: Future[Done] = migrationDao.createProgressTable() diff --git a/migration/src/main/scala/akka/persistence/r2dbc/migration/MigrationToolDao.scala b/migration/src/main/scala/akka/persistence/r2dbc/migration/MigrationToolDao.scala index 0c08e18a..0db12e59 100644 --- a/migration/src/main/scala/akka/persistence/r2dbc/migration/MigrationToolDao.scala +++ b/migration/src/main/scala/akka/persistence/r2dbc/migration/MigrationToolDao.scala @@ -6,19 +6,18 @@ package akka.persistence.r2dbc.migration import scala.concurrent.ExecutionContext import scala.concurrent.Future -import scala.concurrent.duration.FiniteDuration import akka.Done import akka.actor.typed.ActorSystem import akka.annotation.InternalApi import akka.dispatch.ExecutionContexts import akka.persistence.r2dbc.internal.Sql.InterpolationWithAdapter -import akka.persistence.r2dbc.internal.R2dbcExecutor import akka.persistence.r2dbc.internal.codec.IdentityAdapter import akka.persistence.r2dbc.internal.codec.QueryAdapter -import io.r2dbc.spi.ConnectionFactory import org.slf4j.LoggerFactory +import akka.persistence.r2dbc.internal.R2dbcExecutorProvider + /** * INTERNAL API */ @@ -35,14 +34,13 @@ import org.slf4j.LoggerFactory /** * INTERNAL API */ -@InternalApi private[r2dbc] class MigrationToolDao( - connectionFactory: ConnectionFactory, - logDbCallsExceeding: FiniteDuration, - closeCallsExceeding: Option[FiniteDuration])(implicit ec: ExecutionContext, system: ActorSystem[_]) { +@InternalApi private[r2dbc] class MigrationToolDao(executorProvider: R2dbcExecutorProvider)(implicit + ec: ExecutionContext, + system: ActorSystem[_]) { import MigrationToolDao._ - private implicit val queryAdapter: QueryAdapter = IdentityAdapter - private val r2dbcExecutor = - new R2dbcExecutor(connectionFactory, log, logDbCallsExceeding, closeCallsExceeding)(ec, system) + implicit val queryAdapter: QueryAdapter = IdentityAdapter + // progress always in data partition 0 + private val r2dbcExecutor = executorProvider.executorFor(slice = 0) def createProgressTable(): Future[Done] = { r2dbcExecutor.executeDdl("create migration progress table") { connection =>