diff --git a/README.md b/README.md index 853fecd7c17..b86c1d2600b 100644 --- a/README.md +++ b/README.md @@ -80,11 +80,11 @@ The IBM FHIR Server is modular and extensible. The following tables provide an o |fhir-audit|Audit-related interfaces and implementations including 1) a No-op AuditLogService and 2) an AuditLogService that writes audit events to Apache Kafka in the Cloud Auditing Data Federation (CADF) JSON format|false| |fhir-search|Utilities for working with the FHIR search specification|false| |fhir-persistence|Interfaces, helpers, and tests for implementing a persistence layer or persistence interceptors for the IBM FHIR Server|false| -|fhir-persistence-schema|Classes for deploying and updating the IBM FHIR Server relational database schema|false| |fhir-persistence-jdbc|A relational FHIRPersistence implementation that uses JDBC to store and query FHIR resources|false| |fhir-persistence-scout|A scale out persistence layer to store and query FHIR resources *experimental* |false| |fhir-persistence-cos|Decorates the fhir-persistence-jdbc module with the ability to offload payload storage to IBM Cloud Object Storage *experimental* |false| |fhir-persistence-cassandra|Decorates the fhir-persistence-jdbc module with the ability to offload payload storage to Cassandra *experimental* |false| +|fhir-persistence-blob|Decorates the fhir-persistence-jdbc module with the ability to offload payload storage to Azure Blob *experimental* |false| |fhir-provider|JAX-RS Providers for FHIR XML and JSON and related patch formats|false| |fhir-server|JAX-RS resources and related classes for implementing the FHIR REST API and extended operations|false| |fhir-server-webapp|A web application that packages the fhir-server with a set of built-in extended operations|false| @@ -135,6 +135,8 @@ The IBM FHIR Server is modular and extensible. The following tables provide an o |fhir-install|Packaging and installation scripts for creating the fhir-distribution zip and the corresponding IBM FHIR Server Docker image|false| |fhir-benchmark|Java Microbenchmark Harness (JMH) tests for measuring read/write/validation performance for the IBM FHIR Server and the HL7 FHIR Java Reference Implementation|false| |fhir-bucket|Scans cloud object storage buckets and uploads data using the FHIR REST API|false| +|fhir-persistence-schema|Classes for deploying and updating the IBM FHIR Server relational database schema|false| +|fhir-persistence-cassandra-app|CLI utility application supporting payload storage to Cassandra *experimental* |false| ### Contributing to the IBM FHIR Server The IBM FHIR Server is under active development. To help develop the server, clone or download the project and build it using Maven. diff --git a/build/security/asoc.sh b/build/security/asoc.sh index fc52fe1cb9f..b98339f76c9 100644 --- a/build/security/asoc.sh +++ b/build/security/asoc.sh @@ -16,6 +16,7 @@ mkdir -p ${WORKSPACE}/build/security/logs/output/ find ${WORKSPACE} -iname 'fhir-*.jar' -not -iname 'fhir*-tests.jar' -not -iname 'fhir*-test-*.jar' -not -iname 'fhir-persistence-schema-*-cli.jar' -not -iname 'fhir-swagger-generator-*-cli.jar' \ -not -iname 'fhir-examples-*.jar' -not -name 'fhir-bulkdata-webapp-*-client.jar' -not -iname 'fhir*-ig-*.jar' -not -iname 'fhir-bucket-*-cli.jar' -not -path '*/target/fhir-server-webapp-*' \ -not -iname 'fhir-operation-cqf-*-shaded.jar' -not -iname 'fhir-operation-cpg-*-shaded.jar' -not -iname 'fhir-term-graph-loader-*-cli.jar' \ + -not -path '*/target/fhir-persistence-cassandra-app*' \ -not -path '*/target/fhir-bulkdata*' -exec cp -f {} ${WORKSPACE}/build/security/logs/tmp \; cd ${WORKSPACE}/build/security/logs/ diff --git a/fhir-bucket/README.md b/fhir-bucket/README.md index 472c61ca1a5..7c6d5658e1a 100644 --- a/fhir-bucket/README.md +++ b/fhir-bucket/README.md @@ -229,6 +229,14 @@ java -jar "${JAR}" \ This tracking database can be shared with the instance used by FHIR, but for proper performance testing it should be on a separate host. The standard schema for the tables is FHIRBUCKET. +Schema creation does not have to be performed separately. If you wish to create the schema as part of the main program run, specify + +``` + --bootstrap-schema +``` + +on the command line. The schema creation and update process is idempotent. Changes are only applied when required, and concurrency is managed correctly to ensure only one instance makes changes if multiple instances of fhir-bucket are run simultaneously. + The preferred approach is to use the new `--bootstrap-schema` option when running the main workload, in which case the `--create-schema` activity isn't required. @@ -247,6 +255,7 @@ java -jar "${JAR}" \ --db-properties db2.properties \ --cos-properties cos.properties \ --fhir-properties fhir.properties \ + --bootstrap-schema \ --bucket example-bucket \ --tenant-name example-tenant \ --file-type NDJSON \ @@ -275,11 +284,73 @@ To run using PostgreSQL, change the relevant arguments to: ... ``` +The `--immediate-local` option can be used to load files from a local file-system without the need for a FHIRBUCKET database or connection to COS: + +``` +java \ + -Djava.util.logging.config.file=logging.properties \ + -jar "${JAR}" \ + --db-type postgresql \ + --db-properties db.properties \ + --fhir-properties fhir.properties \ + --tenant-name your-tenant-name \ + --file-type JSON \ + --max-concurrent-fhir-requests 0 \ + --max-concurrent-json-files 0 \ + --max-concurrent-ndjson-files 0 \ + --connection-pool-size 10 \ + --immediate-local \ + --scan-local-dir /path/to/synthea/data +``` + +Because --immediate-local does not use a FHIRBUCKET database, there is no tracking of the logical ids generated by the IBM FHIR Server. This means it is not possible to run the interop workload against this data. + +To track the logical ids, you can provide a FHIRBUCKET database configuration along with the --scan-local-dir argument, but do not specify --immediate-local: + +``` +java \ + -Djava.util.logging.config.file=logging.properties \ + -jar "${JAR}" \ + --db-type postgresql \ + --db-properties db.properties \ + --fhir-properties fhir.properties \ + --tenant-name your-tenant-name \ + --file-type JSON \ + --max-concurrent-fhir-requests 0 \ + --max-concurrent-json-files 0 \ + --max-concurrent-ndjson-files 0 \ + --connection-pool-size 10 \ + --scan-local-dir /path/to/synthea/data +``` + +Once the directory scanning is complete, the program can be terminated. To load the files now registered in the FHIRBUCKET database, run the following: + +``` +java \ + -Djava.util.logging.config.file=logging.properties \ + -jar "${JAR}" \ + --db-type postgresql \ + --db-properties db.properties \ + --fhir-properties fhir.properties \ + --tenant-name your-tenant-name \ + --file-type JSON \ + --no-scan \ + --max-concurrent-fhir-requests 40 \ + --max-concurrent-json-files 10 \ + --max-concurrent-ndjson-files 0 \ + --connection-pool-size 40 \ + --scan-local-dir /path/to/synthea/data +``` + +Note that the --scan-local-dir [path-name] option must still be provided. + + | Property Name | Description | | -------------------- | -----| | `--bootstrap-schema` | Creates/updates the schema as an initial step before starting the main workload. Simplifies cloud deployment scenarios by avoiding the need for a separate job. Ensures only one instance will try to update the schema at a time. Do not specify `--create-schema` when using this option. | | `--db-type type` | where `type` is one of: db2, derby, postgresql. Specifies the type of database to use for the FHIRBUCKET tracking data. | | `--create-schema` | Creates a new or updates an existing database schema. The program will exit after the schema operations have completed.| +| `--bootstrap-schema` | Creates a new or updates an existing database schema. The program will not exit after the schema operations have completed.| | `--schema-name` | The custom schema used for FHIRBUCKET tracking data. The default is `FHIRBUCKET`.| | `--tenant-name fhir-tenant-name` | The IBM FHIR Server tenant name| | `--cos-properties properties-file` | Connection properties file for COS | diff --git a/fhir-bulkdata-webapp/src/main/java/com/ibm/fhir/bulkdata/jbatch/load/ChunkWriter.java b/fhir-bulkdata-webapp/src/main/java/com/ibm/fhir/bulkdata/jbatch/load/ChunkWriter.java index 5d28a037a15..76adf4b5f4e 100644 --- a/fhir-bulkdata-webapp/src/main/java/com/ibm/fhir/bulkdata/jbatch/load/ChunkWriter.java +++ b/fhir-bulkdata-webapp/src/main/java/com/ibm/fhir/bulkdata/jbatch/load/ChunkWriter.java @@ -54,6 +54,7 @@ import com.ibm.fhir.operation.bulkdata.model.type.OperationFields; import com.ibm.fhir.operation.bulkdata.model.type.StorageType; import com.ibm.fhir.persistence.FHIRPersistence; +import com.ibm.fhir.persistence.FHIRPersistenceSupport; import com.ibm.fhir.persistence.InteractionStatus; import com.ibm.fhir.persistence.SingleResourceResult; import com.ibm.fhir.persistence.context.FHIRPersistenceContext; @@ -61,7 +62,6 @@ import com.ibm.fhir.persistence.context.FHIRPersistenceEvent; import com.ibm.fhir.persistence.helper.FHIRPersistenceHelper; import com.ibm.fhir.persistence.helper.FHIRTransactionHelper; -import com.ibm.fhir.persistence.payload.PayloadPersistenceHelper; import com.ibm.fhir.persistence.util.FHIRPersistenceUtil; import com.ibm.fhir.validation.exception.FHIRValidationException; @@ -305,7 +305,7 @@ public OperationOutcome conditionalFingerprintUpdate(ImportTransientUserData chu SingleResourceResult oldResourceResult = persistence.read(context, resource.getClass(), logicalId); Resource oldResource = oldResourceResult.getResource(); - final com.ibm.fhir.model.type.Instant lastUpdated = PayloadPersistenceHelper.getCurrentInstant(); + final com.ibm.fhir.model.type.Instant lastUpdated = FHIRPersistenceSupport.getCurrentInstant(); final int newVersionNumber = oldResource != null && oldResource.getMeta() != null && oldResource.getMeta().getVersionId() != null ? Integer.parseInt(oldResource.getMeta().getVersionId().getValue()) + 1 : 1; resource = FHIRPersistenceUtil.copyAndSetResourceMetaFields(resource, logicalId, newVersionNumber, lastUpdated); diff --git a/fhir-client/src/main/java/com/ibm/fhir/client/impl/FHIRClientImpl.java b/fhir-client/src/main/java/com/ibm/fhir/client/impl/FHIRClientImpl.java index 0fbea6f0c5b..125c72dc62d 100644 --- a/fhir-client/src/main/java/com/ibm/fhir/client/impl/FHIRClientImpl.java +++ b/fhir-client/src/main/java/com/ibm/fhir/client/impl/FHIRClientImpl.java @@ -888,7 +888,8 @@ private void initProperties(Properties props) throws Exception { setHostnameVerificationEnabled(Boolean.parseBoolean(getProperty(PROPNAME_HOSTNAME_VERIFICATION_ENABLED, "true"))); - setHttpTimeout(Integer.parseUnsignedInt(getProperty(PROPNAME_HTTP_TIMEOUT, "60000"))); + // Use a default that's longer than the default Liberty transaction timeout + setHttpTimeout(Integer.parseUnsignedInt(getProperty(PROPNAME_HTTP_TIMEOUT, "130000"))); setTenantId(getProperty(PROPNAME_TENANT_ID, null)); } catch (Throwable t) { diff --git a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/derby/DerbyPropertyAdapter.java b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/derby/DerbyPropertyAdapter.java index 91c262759c4..f01c8bdaf01 100644 --- a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/derby/DerbyPropertyAdapter.java +++ b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/derby/DerbyPropertyAdapter.java @@ -1,5 +1,5 @@ /* - * (C) Copyright IBM Corp. 2019, 2020 + * (C) Copyright IBM Corp. 2019, 2022 * * SPDX-License-Identifier: Apache-2.0 */ @@ -53,4 +53,13 @@ public void setAutoCreate(boolean create) { public boolean isAutoCreate() { return "Y".equals(this.properties.getProperty(CREATE_KEY)); } + + @Override + public String getDefaultSchema() { + String result = super.getDefaultSchema(); + if (result == null) { + result = "APP"; + } + return result; + } } diff --git a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/pool/DatabaseSupport.java b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/pool/DatabaseSupport.java new file mode 100644 index 00000000000..a968b234a25 --- /dev/null +++ b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/pool/DatabaseSupport.java @@ -0,0 +1,189 @@ +/* + * (C) Copyright IBM Corp. 2022 + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.fhir.database.utils.pool; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Properties; + +import com.ibm.fhir.database.utils.api.IConnectionProvider; +import com.ibm.fhir.database.utils.api.IDatabaseAdapter; +import com.ibm.fhir.database.utils.api.IDatabaseTranslator; +import com.ibm.fhir.database.utils.api.ITransaction; +import com.ibm.fhir.database.utils.api.ITransactionProvider; +import com.ibm.fhir.database.utils.common.JdbcConnectionProvider; +import com.ibm.fhir.database.utils.db2.Db2Adapter; +import com.ibm.fhir.database.utils.db2.Db2PropertyAdapter; +import com.ibm.fhir.database.utils.db2.Db2Translator; +import com.ibm.fhir.database.utils.derby.DerbyAdapter; +import com.ibm.fhir.database.utils.derby.DerbyPropertyAdapter; +import com.ibm.fhir.database.utils.derby.DerbyTranslator; +import com.ibm.fhir.database.utils.model.DbType; +import com.ibm.fhir.database.utils.postgres.PostgresAdapter; +import com.ibm.fhir.database.utils.postgres.PostgresPropertyAdapter; +import com.ibm.fhir.database.utils.postgres.PostgresTranslator; +import com.ibm.fhir.database.utils.transaction.SimpleTransactionProvider; + +/** + * Support class for managing connections to a database for utility apps + */ +public class DatabaseSupport implements IConnectionProvider, ITransactionProvider { + private static final int DEFAULT_CONNECTION_POOL_SIZE = 10; + private final Properties dbProperties; + private final DbType dbType; + + // The translator for the configured database type + private IDatabaseTranslator translator; + + // The adapter configured for the type of database we're using + private IDatabaseAdapter adapter; + + // Connection pool used to work alongside the transaction provider + private PoolConnectionProvider connectionPool; + + // Simple transaction service for use outside of JEE + private ITransactionProvider transactionProvider; + + private int connectionPoolSize = DEFAULT_CONNECTION_POOL_SIZE; + + /** + * Public constructor + * @param dbProperties + * @param dbType + */ + public DatabaseSupport(Properties dbProperties, DbType dbType) { + this.dbProperties = dbProperties; + this.dbType = dbType; + } + + /** + * Build the database configuration from the configured properties + */ + public void init() { + switch (this.dbType) { + case DB2: + configureForDb2(); + break; + case DERBY: + configureForDerby(); + break; + case POSTGRESQL: + configureForPostgresql(); + break; + default: + throw new IllegalStateException("Unsupported database type: " + this.dbType); + } + } + + /** + * Set up the connection pool and transaction provider for connecting to a Derby + * database + */ + private void configureForDerby() { + DerbyPropertyAdapter propertyAdapter = new DerbyPropertyAdapter(dbProperties); + + this.translator = new DerbyTranslator(); + IConnectionProvider cp = new JdbcConnectionProvider(this.translator, propertyAdapter); + this.connectionPool = new PoolConnectionProvider(cp, connectionPoolSize); + this.connectionPool.setCloseOnAnyError(); + this.adapter = new DerbyAdapter(connectionPool); + this.transactionProvider = new SimpleTransactionProvider(connectionPool); + } + + /** + * Set up the connection pool and transaction provider for connecting to a DB2 + * database + */ + private void configureForDb2() { + + this.translator = new Db2Translator(); + try { + Class.forName(translator.getDriverClassName()); + } catch (ClassNotFoundException e) { + throw new IllegalStateException(e); + } + + Db2PropertyAdapter propertyAdapter = new Db2PropertyAdapter(dbProperties); + IConnectionProvider cp = new JdbcConnectionProvider(translator, propertyAdapter); + this.connectionPool = new PoolConnectionProvider(cp, connectionPoolSize); + this.adapter = new Db2Adapter(connectionPool); + this.transactionProvider = new SimpleTransactionProvider(connectionPool); + } + + /** + * Set up the connection pool and transaction provider for connecting to a DB2 + * database + */ + private void configureForPostgresql() { + this.translator = new PostgresTranslator(); + try { + Class.forName(translator.getDriverClassName()); + } catch (ClassNotFoundException e) { + throw new IllegalStateException(e); + } + + PostgresPropertyAdapter propertyAdapter = new PostgresPropertyAdapter(dbProperties); + IConnectionProvider cp = new JdbcConnectionProvider(translator, propertyAdapter); + this.connectionPool = new PoolConnectionProvider(cp, connectionPoolSize); + this.adapter = new PostgresAdapter(connectionPool); + this.transactionProvider = new SimpleTransactionProvider(connectionPool); + } + + /** + * Get the configured database adapter + * @return + */ + public IDatabaseAdapter getDatabaseAdapter() { + if (this.adapter == null) { + throw new IllegalStateException("DatabaseSupport not initialized"); + } + return this.adapter; + } + + /** + * Get the IDatabaseTranslator for the configured database type + * @return + */ + public IDatabaseTranslator getTranslator() { + if (this.translator == null) { + throw new IllegalStateException("DatabaseSupport not initialized"); + } + return this.translator; + } + + @Override + public Connection getConnection() throws SQLException { + if (this.connectionPool == null) { + throw new IllegalStateException("DatabaseSupport not initialized"); + } + + return this.connectionPool.getConnection(); + } + + @Override + public ITransaction getTransaction() { + if (this.transactionProvider == null) { + throw new IllegalStateException("DatabaseSupport not initialized"); + } + return this.transactionProvider.getTransaction(); + } + + @Override + public void commitTransaction() throws SQLException { + connectionPool.commitTransaction(); + } + + @Override + public void rollbackTransaction() throws SQLException { + connectionPool.rollbackTransaction(); + } + + @Override + public void describe(String prefix, StringBuilder cfg, String key) { + connectionPool.describe(prefix, cfg, key); + } +} diff --git a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/query/Select.java b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/query/Select.java index f4fad77035d..6397c514cc7 100644 --- a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/query/Select.java +++ b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/query/Select.java @@ -120,6 +120,18 @@ public void addColumn(String source, String name) { public void addColumn(String source, String name, Alias alias) { selectList.addColumn(source, name, alias); } + + /** + * Add a value column to the select list, for example: + * addColumn("5", alias("RESOURCE_TYPE_ID")) can be used for: + * SELECT foo, 5 AS RESOURCE_TYPE_ID + * FROM ... + * @param columnValue + * @param alias + */ + public void addColumn(String columnValue, Alias alias) { + selectList.addColumn(columnValue, alias); + } /** * Add a table item to the from-clause diff --git a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/query/SelectAdapter.java b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/query/SelectAdapter.java index 2e62d2b58f7..45299e677e5 100644 --- a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/query/SelectAdapter.java +++ b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/query/SelectAdapter.java @@ -61,6 +61,19 @@ public SelectAdapter addColumn(String source, String name, Alias alias) { this.select.addColumn(source, name, alias); return this; } + + /** + * Add a column value with a given alias. Can be used to add literals in + * the select list + * @param source + * @param name + * @param alias + * @return + */ + public SelectAdapter addColumn(String columnValue, Alias alias) { + this.select.addColumn(columnValue, alias); + return this; + } /** * Create a from clause for this select statement diff --git a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/query/SelectList.java b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/query/SelectList.java index 75354a6cc6a..c039e72cecf 100644 --- a/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/query/SelectList.java +++ b/fhir-database-utils/src/main/java/com/ibm/fhir/database/utils/query/SelectList.java @@ -35,6 +35,18 @@ public SelectItemColumn addColumn(String source, String name, Alias alias) { return column; } + /** + * Add a value column (used to add literals) + * @param columnValue + * @param alias + * @return + */ + public SelectItemColumn addColumn(String columnValue, Alias alias) { + SelectItemColumn column = new SelectItemColumn(null, columnValue, alias); + items.add(column); + return column; + } + public SelectItemSubQuery addSubQuery(Select subQuery, Alias alias) { SelectItemSubQuery column = new SelectItemSubQuery(subQuery, alias); items.add(column); diff --git a/fhir-parent/pom.xml b/fhir-parent/pom.xml index 544a71d00e5..3519fb3c3e4 100644 --- a/fhir-parent/pom.xml +++ b/fhir-parent/pom.xml @@ -78,6 +78,9 @@ ../fhir-persistence ../fhir-persistence-schema ../fhir-persistence-jdbc + ../fhir-persistence-cassandra + ../fhir-persistence-cassandra-app + ../fhir-persistence-blob ../fhir-provider ../cql ../operation/fhir-operation-test diff --git a/fhir-persistence-blob/.gitignore b/fhir-persistence-blob/.gitignore new file mode 100644 index 00000000000..b1b060f0c5d --- /dev/null +++ b/fhir-persistence-blob/.gitignore @@ -0,0 +1,2 @@ +/derby/ +/derby.log diff --git a/fhir-persistence-blob/pom.xml b/fhir-persistence-blob/pom.xml new file mode 100644 index 00000000000..4ad9ad290a0 --- /dev/null +++ b/fhir-persistence-blob/pom.xml @@ -0,0 +1,112 @@ + + 4.0.0 + + fhir-persistence-blob + + + com.ibm.fhir + fhir-parent + 4.11.0-SNAPSHOT + ../fhir-parent + + + + + ${project.groupId} + fhir-persistence + ${project.version} + + + ${project.groupId} + fhir-examples + test + + + ${project.groupId} + fhir-persistence-schema + ${project.version} + provided + + + ${project.groupId} + fhir-persistence-jdbc + ${project.version} + + + ${project.groupId} + fhir-config + ${project.version} + + + ${project.groupId} + fhir-model + ${project.version} + + + ${project.groupId} + fhir-path + ${project.version} + + + ${project.groupId} + fhir-validation + ${project.version} + test + + + ${project.groupId} + fhir-validation + ${project.version} + test-jar + test + + + ${project.groupId} + fhir-model + ${project.version} + test-jar + test + + + ${project.groupId} + fhir-persistence + ${project.version} + test-jar + test + + + ${project.groupId} + fhir-search + ${project.version} + + + jakarta.transaction + jakarta.transaction-api + provided + + + com.azure + azure-storage-blob + + + com.azure + azure-core-http-okhttp + + + com.azure + azure-core + + + org.testng + testng + test + + + org.skyscreamer + jsonassert + test + + + + diff --git a/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/BlobContainerManager.java b/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/BlobContainerManager.java new file mode 100644 index 00000000000..a13efdbbcaf --- /dev/null +++ b/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/BlobContainerManager.java @@ -0,0 +1,206 @@ +/* + * (C) Copyright IBM Corp. 2021 + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.fhir.persistence.blob; + +import java.util.concurrent.ConcurrentHashMap; +import java.util.logging.Logger; + +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobContainerClientBuilder; +import com.ibm.fhir.config.FHIRConfigHelper; +import com.ibm.fhir.config.FHIRConfiguration; +import com.ibm.fhir.config.FHIRRequestContext; +import com.ibm.fhir.config.PropertyGroup; +import com.ibm.fhir.core.lifecycle.EventCallback; +import com.ibm.fhir.core.lifecycle.EventManager; + +/** + * Singleton to abstract and manage Azure Blob containers. + * Each tenant/datasource gets its own container. + * + * TODO: investigate if BlobContainerClient should be long-lived and shared + * by multiple threads and how to configure its http client library and executor + * when running inside Liberty. + */ +public class BlobContainerManager implements EventCallback { + private static final Logger logger = Logger.getLogger(BlobContainerManager.class.getName()); + + // Map holding one container client instance per tenant/datasource + private final ConcurrentHashMap connectionMap = new ConcurrentHashMap<>(); + + // so we can reject future requests when shut down + private volatile boolean running = true; + + /** + * Singleton pattern safe construction + */ + private static class Helper { + private static BlobContainerManager INSTANCE = new BlobContainerManager(); + } + + /** + * Private constructor + */ + private BlobContainerManager() { + // receive server lifecycle events + EventManager.register(this); + } + + /** + * Get the singleton instance of this class + * @return + */ + public static BlobContainerManager getInstance() { + return Helper.INSTANCE; + } + + /** + * Get the (shared, thread-safe) connection object representing the Azure + * Blob connection for the current tenant/datasource + * (see {@link FHIRRequestContext}). + * @return + */ + public static BlobManagedContainer getSessionForTenantDatasource() { + return BlobContainerManager.getInstance().getOrCreateSession(); + } + + /** + * Get or create the Azure Blob connection for the current + * tenant/datasource. + * @return a BlobContainerClient for the current tenant/datasource + */ + private BlobManagedContainer getOrCreateSession() { + if (!running) { + throw new IllegalStateException("BlobConnectionManager is shut down"); + } + + // Connections can be tenant-specific, so find out what tenant we're associated with and use its persistence + // configuration to obtain the appropriate CqlSession instance (shared by multiple threads). + final String tenantId = FHIRRequestContext.get().getTenantId(); + final String dsId = FHIRRequestContext.get().getDataStoreId(); + TenantDatasourceKey key = new TenantDatasourceKey(tenantId, dsId); + + // Get the session for this tenant/datasource, or create a new one if needed + BlobContainerClient client = connectionMap.computeIfAbsent(key, BlobContainerManager::newConnection); + String dsPropertyName = FHIRConfiguration.PROPERTY_PERSISTENCE_PAYLOAD + "/" + key.getDatasourceId(); + BlobPropertyGroupAdapter properties = getPropertyGroupAdapter(dsPropertyName); + + return new BlobManagedContainer(client, properties); + } + + /** + * Build a new CqlSession object for the tenant/datasource tuple described by key. + * @param key + * @return + */ + private static BlobContainerClient newConnection(TenantDatasourceKey key) { + String dsPropertyName = FHIRConfiguration.PROPERTY_PERSISTENCE_PAYLOAD + "/" + key.getDatasourceId(); + BlobPropertyGroupAdapter adapter = getPropertyGroupAdapter(dsPropertyName); + return makeConnection(key, adapter); + } + + /** + * Check if payload persistence is configured for the current tenant/datasource + * @return + */ + public static boolean isPayloadPersistenceConfigured() { + final String tenantId = FHIRRequestContext.get().getTenantId(); + final String dsId = FHIRRequestContext.get().getDataStoreId(); + TenantDatasourceKey key = new TenantDatasourceKey(tenantId, dsId); + String dsPropertyName = FHIRConfiguration.PROPERTY_PERSISTENCE_PAYLOAD + "/" + key.getDatasourceId(); + PropertyGroup dsPG = FHIRConfigHelper.getPropertyGroup(dsPropertyName); + return dsPG != null; + } + + /** + * Get a CassandraPropertyGroupAdapter bound to the property group described by + * the given dsPropertyName path (in fhir-server-config.json). + * @param dsPropertyName + * @return + */ + public static BlobPropertyGroupAdapter getPropertyGroupAdapter(String dsPropertyName) { + + PropertyGroup dsPG = FHIRConfigHelper.getPropertyGroup(dsPropertyName); + if (dsPG == null) { + throw new IllegalStateException("Could not locate configuration property group: " + dsPropertyName); + } + + try { + // Get the datasource type (should be "azure.blob" in this case). + String type = dsPG.getStringProperty("type", null); + if (type == null) { + throw new IllegalStateException("Could not locate 'type' property within datasource property group: " + dsPropertyName); + } + + // Confirm that this is an Azure Blob datasource configuration element + if (!"azure.blob".equals(type)) { + throw new IllegalStateException("Unsupported 'type' property value within datasource property group: " + type); + } + + // Get the connection properties + PropertyGroup connectionProps = dsPG.getPropertyGroup("connectionProperties"); + if (connectionProps == null) { + throw new IllegalStateException("Could not locate 'connectionProperties' property group within datasource property group: " + dsPropertyName); + } + + return new BlobPropertyGroupAdapter(connectionProps); + } + catch (Exception x) { + throw new IllegalStateException(x); + } + } + + /** + * Get the BlobContainerClient for the Azure blob endpoint using the configuration + * described by the {@link BlobPropertyGroupAdapter}. + * @param key + * @param adapter + * @return + */ + private static BlobContainerClient makeConnection(TenantDatasourceKey key, BlobPropertyGroupAdapter adapter) { + BlobContainerClient blobContainerClient = new BlobContainerClientBuilder() + .connectionString(adapter.getConnectionString()) + .containerName(adapter.getTenantContainer()) + .buildClient(); + + return blobContainerClient; + } + + /** + * Get rid of any sessions we're holding on to + */ + private void closeAllSessions() { + // prevent anyone asking for a session + this.running = false; + connectionMap.clear(); + } + + /** + * Close any sessions that are currently open to permit a clean exit + * TODO what shutdown do we need to do + */ + public static void shutdown() { + logger.info("Shutting down DatasourceSessions"); + getInstance().closeAllSessions(); + logger.info("DatasourceSessions shutdown complete"); + } + + @Override + public void serverReady() { + // NOP + } + + @Override + public void startShutdown() { + this.running = false; + } + + @Override + public void finalShutdown() { + connectionMap.clear(); + } +} \ No newline at end of file diff --git a/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/BlobDeletePayload.java b/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/BlobDeletePayload.java new file mode 100644 index 00000000000..4e4787bf266 --- /dev/null +++ b/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/BlobDeletePayload.java @@ -0,0 +1,66 @@ +/* + * (C) Copyright IBM Corp. 2021 + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.fhir.persistence.blob; + +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.azure.core.http.rest.Response; +import com.azure.storage.blob.BlobClient; +import com.azure.storage.blob.models.DeleteSnapshotsOptionType; +import com.ibm.fhir.persistence.exception.FHIRPersistenceException; + +/** + * DAO command to delete the configured Azure blob (holding a FHIR payload object) + */ +public class BlobDeletePayload { + private static final Logger logger = Logger.getLogger(BlobReadPayload.class.getName()); + final int resourceTypeId; + final String logicalId; + final int version; + final String resourcePayloadKey; + + /** + * Public constructor + * @param resourceTypeId + * @param logicalId + * @param version + * @param resourcePayloadKey + */ + public BlobDeletePayload(int resourceTypeId, String logicalId, int version, String resourcePayloadKey) { + this.resourceTypeId = resourceTypeId; + this.logicalId = logicalId; + this.version = version; + this.resourcePayloadKey = resourcePayloadKey; + } + + /** + * Execute this command against the given client + * @param client + * @throws FHIRPersistenceException + */ + public Response run(BlobManagedContainer client) throws FHIRPersistenceException { + final StringBuilder blobNameBuilder = new StringBuilder(); + blobNameBuilder.append(resourceTypeId); + blobNameBuilder.append("/"); + blobNameBuilder.append(logicalId); + blobNameBuilder.append("/"); + blobNameBuilder.append(version); + blobNameBuilder.append("/"); + blobNameBuilder.append(resourcePayloadKey); + BlobClient bc = client.getClient().getBlobClient(blobNameBuilder.toString()); + + try { + Response response = bc.deleteWithResponse(DeleteSnapshotsOptionType.INCLUDE, null, client.getProperties().getTimeout(), null); + return response; + } catch (Exception x) { + logger.log(Level.SEVERE, "Error deleting resource payload for resourceTypeId=" + resourceTypeId + + ", logicalId=" + logicalId + ", resourcePayloadKey = " + resourcePayloadKey); + throw new FHIRPersistenceException("Error deleting resource payload"); + } + } +} \ No newline at end of file diff --git a/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/BlobManagedContainer.java b/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/BlobManagedContainer.java new file mode 100644 index 00000000000..3bcf3f04f83 --- /dev/null +++ b/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/BlobManagedContainer.java @@ -0,0 +1,43 @@ +/* + * (C) Copyright IBM Corp. 2021 + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.fhir.persistence.blob; + +import com.azure.storage.blob.BlobContainerClient; + +/** + * A blob container managed by the BlobContainerManager + */ +public class BlobManagedContainer { + private final BlobContainerClient client; + private final BlobPropertyGroupAdapter properties; + + /** + * Package protected constructor + * @param client + * @param properties + */ + protected BlobManagedContainer(BlobContainerClient client, BlobPropertyGroupAdapter properties) { + this.client = client; + this.properties = properties; + } + + /** + * Get the client + * @return + */ + public BlobContainerClient getClient() { + return this.client; + } + + /** + * Get the properties + * @return + */ + public BlobPropertyGroupAdapter getProperties() { + return this.properties; + } +} \ No newline at end of file diff --git a/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/BlobPropertyGroupAdapter.java b/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/BlobPropertyGroupAdapter.java new file mode 100644 index 00000000000..8284e242a2a --- /dev/null +++ b/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/BlobPropertyGroupAdapter.java @@ -0,0 +1,78 @@ +/* + * (C) Copyright IBM Corp. 2021 + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.fhir.persistence.blob; + +import java.time.Duration; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.ibm.fhir.config.PropertyGroup; + +/** + * Provides a facade on top of the fhir-server-config PropertyGroup structure + * to simplify access to configuration elements we need for connecting to + * the Azure Blob API + */ +public class BlobPropertyGroupAdapter { + private static final Logger logger = Logger.getLogger(BlobPropertyGroupAdapter.class.getName()); + + // The property name for describing the user-specific connection details including credentials + public static final String PROP_CONNECTION_STRING = "connectionString"; + + // The property name for the Azure Blob container name to use for the tenant + public static final String PROP_TENANT_CONTAINER = "tenantContainer"; + + // The property name for the Azure Blob command timeout in seconds + public static final String PROP_TIMEOUT_SECS = "timeoutSecs"; + + // The property group we are wrapping + private final PropertyGroup propertyGroup; + + public BlobPropertyGroupAdapter(PropertyGroup pg) { + this.propertyGroup = pg; + } + + /** + * Get the configured value for the Azure Blob connectionString + * @return + */ + public String getConnectionString() { + try { + return propertyGroup.getStringProperty(PROP_CONNECTION_STRING); + } catch (Exception x) { + logger.log(Level.SEVERE, PROP_CONNECTION_STRING, x); + throw new IllegalArgumentException("Property group not configured " + PROP_CONNECTION_STRING); + } + } + + /** + * Get the configured value for the keyspace to use for the tenant. + * @return + */ + public String getTenantContainer() { + try { + return propertyGroup.getStringProperty(PROP_TENANT_CONTAINER); + } catch (Exception x) { + logger.log(Level.SEVERE, PROP_TENANT_CONTAINER, x); + throw new IllegalArgumentException("Property group not configured " + PROP_TENANT_CONTAINER); + } + } + + /** + * Get the {@link Duration} representing the configured timeout + * @return + */ + public Duration getTimeout() { + try { + int timeoutSeconds = propertyGroup.getIntProperty(PROP_TIMEOUT_SECS, 120); + return Duration.ofSeconds(timeoutSeconds); + } catch (Exception x) { + logger.log(Level.SEVERE, PROP_TIMEOUT_SECS, x); + throw new IllegalArgumentException("Bad property " + PROP_TIMEOUT_SECS); + } + } +} \ No newline at end of file diff --git a/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/BlobReadPayload.java b/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/BlobReadPayload.java new file mode 100644 index 00000000000..5d7617f0101 --- /dev/null +++ b/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/BlobReadPayload.java @@ -0,0 +1,80 @@ +/* + * (C) Copyright IBM Corp. 2021 + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.fhir.persistence.blob; + +import java.util.List; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.azure.core.util.BinaryData; +import com.azure.storage.blob.BlobClient; +import com.ibm.fhir.model.resource.Resource; +import com.ibm.fhir.persistence.FHIRPersistenceSupport; +import com.ibm.fhir.persistence.exception.FHIRPersistenceException; +import com.ibm.fhir.persistence.util.InputOutputByteStream; + +/** + * DAO command to store the configured payload in the Azure blob + */ +public class BlobReadPayload { + private static final Logger logger = Logger.getLogger(BlobReadPayload.class.getName()); + private final int resourceTypeId; + private final String logicalId; + private final int version; + private final String resourcePayloadKey; + private final List elements; + private final boolean compress; + + /** + * Public constructor + * @param resourceTypeId + * @param logicalId + * @param version + * @param resourcePayloadKey + * @param elements + * @param compress + */ + public BlobReadPayload(int resourceTypeId, String logicalId, int version, String resourcePayloadKey, List elements, boolean compress) { + this.resourceTypeId = resourceTypeId; + this.logicalId = logicalId; + this.version = version; + this.resourcePayloadKey = resourcePayloadKey; + this.elements = elements; + this.compress = compress; + } + + /** + * Execute this command against the given client + * @param resourceType + * @param client + * @throws FHIRPersistenceException + */ + public T run(Class resourceType, BlobManagedContainer client) throws FHIRPersistenceException { + T result; + final StringBuilder blobNameBuilder = new StringBuilder(); + blobNameBuilder.append(resourceTypeId); + blobNameBuilder.append("/"); + blobNameBuilder.append(logicalId); + blobNameBuilder.append("/"); + blobNameBuilder.append(version); + blobNameBuilder.append("/"); + blobNameBuilder.append(resourcePayloadKey); + BlobClient bc = client.getClient().getBlobClient(blobNameBuilder.toString()); + + try { + BinaryData binaryData = bc.downloadContent(); + InputOutputByteStream readStream = new InputOutputByteStream(binaryData.toBytes(), 0); + result = FHIRPersistenceSupport.parse(resourceType, readStream.inputStream(), this.elements, this.compress); + } catch (Exception x) { + logger.log(Level.SEVERE, "Error reading resource, resourceTypeId=" + resourceTypeId + + ", logicalId=" + logicalId); + throw new FHIRPersistenceException("Error reading resource payload"); + } + + return result; + } +} \ No newline at end of file diff --git a/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/BlobStorePayload.java b/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/BlobStorePayload.java new file mode 100644 index 00000000000..4349825811e --- /dev/null +++ b/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/BlobStorePayload.java @@ -0,0 +1,64 @@ +/* + * (C) Copyright IBM Corp. 2021 + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.fhir.persistence.blob; + +import com.azure.core.http.rest.Response; +import com.azure.core.util.BinaryData; +import com.azure.storage.blob.BlobClient; +import com.azure.storage.blob.models.BlockBlobItem; +import com.azure.storage.blob.options.BlobParallelUploadOptions; +import com.ibm.fhir.persistence.exception.FHIRPersistenceException; +import com.ibm.fhir.persistence.util.InputOutputByteStream; + +/** + * DAO command to store the configured payload in the Azure blob + */ +public class BlobStorePayload { + final int resourceTypeId; + final String logicalId; + final int version; + final String resourcePayloadKey; + final InputOutputByteStream ioStream; + + /** + * Public constructor + * @param resourceTypeId + * @param logicalId + * @param version + * @param resourcePayloadKey + * @param ioStream + */ + public BlobStorePayload(int resourceTypeId, String logicalId, int version, String resourcePayloadKey, InputOutputByteStream ioStream) { + this.resourceTypeId = resourceTypeId; + this.logicalId = logicalId; + this.version = version; + this.resourcePayloadKey = resourcePayloadKey; + this.ioStream = ioStream; + } + + /** + * Execute this command against the given client + * @param client + * @throws FHIRPersistenceException + */ + public Response run(BlobManagedContainer client) throws FHIRPersistenceException { + final StringBuilder blobNameBuilder = new StringBuilder(); + blobNameBuilder.append(resourceTypeId); + blobNameBuilder.append("/"); + blobNameBuilder.append(logicalId); + blobNameBuilder.append("/"); + blobNameBuilder.append(version); + blobNameBuilder.append("/"); + blobNameBuilder.append(resourcePayloadKey); + BlobClient bc = client.getClient().getBlobClient(blobNameBuilder.toString()); + + BlobParallelUploadOptions uploadOptions = new BlobParallelUploadOptions(BinaryData.fromBytes(ioStream.getRawBuffer())); + Response response = bc.uploadWithResponse(uploadOptions, client.getProperties().getTimeout(), null); + + return response; + } +} \ No newline at end of file diff --git a/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/FHIRPayloadPersistenceBlobImpl.java b/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/FHIRPayloadPersistenceBlobImpl.java new file mode 100644 index 00000000000..0dad0b4f6c4 --- /dev/null +++ b/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/FHIRPayloadPersistenceBlobImpl.java @@ -0,0 +1,95 @@ +/* + * (C) Copyright IBM Corp. 2021, 2022 + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.fhir.persistence.blob; + +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Future; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.ibm.fhir.config.FHIRConfiguration; +import com.ibm.fhir.config.FHIRRequestContext; +import com.ibm.fhir.model.resource.Resource; +import com.ibm.fhir.persistence.FHIRPersistenceSupport; +import com.ibm.fhir.persistence.exception.FHIRPersistenceException; +import com.ibm.fhir.persistence.payload.FHIRPayloadPersistence; +import com.ibm.fhir.persistence.payload.PayloadPersistenceResponse; +import com.ibm.fhir.persistence.payload.PayloadPersistenceResult; +import com.ibm.fhir.persistence.payload.PayloadPersistenceResult.Status; +import com.ibm.fhir.persistence.util.InputOutputByteStream; + + +/** + * Implementation to store and retrieve FHIR payload data using Azure Blob. + */ +public class FHIRPayloadPersistenceBlobImpl implements FHIRPayloadPersistence { + private static final Logger logger = Logger.getLogger(FHIRPayloadPersistenceBlobImpl.class.getName()); + private static final long NANOS = 1000000000L; + public static final boolean PAYLOAD_COMPRESSED = true; + + /** + * Public constructor + */ + public FHIRPayloadPersistenceBlobImpl() { + } + + /** + * Get a tenant-specific connection + */ + protected BlobManagedContainer getBlobContainerClient() { + return BlobContainerManager.getSessionForTenantDatasource(); + } + + @Override + public PayloadPersistenceResponse storePayload(String resourceType, int resourceTypeId, String logicalId, int version, String resourcePayloadKey, Resource resource) + throws FHIRPersistenceException { + Future result; + try { + // We leave compression to the storage platform, making it easier for other clients + // to read the resource data if they want + InputOutputByteStream ioStream = FHIRPersistenceSupport.render(resource, !PAYLOAD_COMPRESSED); + BlobStorePayload spl = new BlobStorePayload(resourceTypeId, logicalId, version, resourcePayloadKey, ioStream); + spl.run(getBlobContainerClient()); + + // TODO actual async behavior + result = CompletableFuture.completedFuture(new PayloadPersistenceResult(Status.OK)); + } catch (Exception x) { + logger.log(Level.SEVERE, "storePayload failed for resource '" + + resourceType + "[" + resourceTypeId + "]/" + logicalId + "/_history/" + version + "'", x); + result = CompletableFuture.completedFuture(new PayloadPersistenceResult(Status.FAILED)); + } + return new PayloadPersistenceResponse(resourcePayloadKey, resourceType, resourceTypeId, logicalId, version, result); + } + + @Override + public T readResource(Class resourceType, String rowResourceTypeName, int resourceTypeId, String logicalId, + int version, String resourcePayloadKey, List elements) throws FHIRPersistenceException { + + logger.fine(() -> "readResource " + rowResourceTypeName + "[" + resourceTypeId + "]/" + logicalId + "/_history/" + version); + final BlobPropertyGroupAdapter config = getConfigAdapter(); + BlobReadPayload cmd = new BlobReadPayload(resourceTypeId, logicalId, version, resourcePayloadKey, elements, !PAYLOAD_COMPRESSED); + return cmd.run(resourceType, getBlobContainerClient()); + } + + @Override + public void deletePayload(String resourceType, int resourceTypeId, String logicalId, Integer version, String resourcePayloadKey) throws FHIRPersistenceException { + logger.fine(() -> "deletePayload " + resourceType + "[" + resourceTypeId + "]/" + logicalId + "/_history/" + version); + BlobDeletePayload cmd = new BlobDeletePayload(resourceTypeId, logicalId, version, resourcePayloadKey); + cmd.run(getBlobContainerClient()); + } + + /** + * Get the config adapter for the current tenant/datasource + * @return + */ + private BlobPropertyGroupAdapter getConfigAdapter() { + final String dsId = FHIRRequestContext.get().getDataStoreId(); + final String dsPropertyName = FHIRConfiguration.PROPERTY_PERSISTENCE_PAYLOAD + "/" + dsId; + return BlobContainerManager.getPropertyGroupAdapter(dsPropertyName); + } +} \ No newline at end of file diff --git a/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/FHIRPersistenceJDBCBlobFactory.java b/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/FHIRPersistenceJDBCBlobFactory.java new file mode 100644 index 00000000000..9cb2610f3d1 --- /dev/null +++ b/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/FHIRPersistenceJDBCBlobFactory.java @@ -0,0 +1,31 @@ +/* + * (C) Copyright IBM Corp. 2021 + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.fhir.persistence.blob; + +import com.ibm.fhir.persistence.exception.FHIRPersistenceException; +import com.ibm.fhir.persistence.jdbc.FHIRPersistenceJDBCFactory; +import com.ibm.fhir.persistence.payload.FHIRPayloadPersistence; + +/** + * Factory for decorating the JDBC persistence layer with a payload + * persistence implementation using Azure Blob. + */ +public class FHIRPersistenceJDBCBlobFactory extends FHIRPersistenceJDBCFactory { + + @Override + public FHIRPayloadPersistence getPayloadPersistence() throws FHIRPersistenceException { + + // If payload persistence is configured for this tenant, provide + // the impl otherwise null + FHIRPayloadPersistence result = null; + if (BlobContainerManager.isPayloadPersistenceConfigured()) { + result = new FHIRPayloadPersistenceBlobImpl(); + } + + return result; + } +} \ No newline at end of file diff --git a/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/TenantDatasourceKey.java b/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/TenantDatasourceKey.java new file mode 100644 index 00000000000..8f72a5294d2 --- /dev/null +++ b/fhir-persistence-blob/src/main/java/com/ibm/fhir/persistence/blob/TenantDatasourceKey.java @@ -0,0 +1,58 @@ +/* + * (C) Copyright IBM Corp. 2020, 2021 + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.fhir.persistence.blob; + +import java.util.Objects; + +/** + * Key used to represent a tenant/datasource pair + */ +public class TenantDatasourceKey { + + // Id representing the tenant + private final String tenantId; + + // Id representing the datasource for a given tenant + private final String datasourceId; + + public TenantDatasourceKey(String tenantId, String datasourceId) { + this.tenantId = tenantId; + this.datasourceId = datasourceId; + } + + @Override + public int hashCode() { + return Objects.hash(tenantId, datasourceId); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof TenantDatasourceKey) { + TenantDatasourceKey that = (TenantDatasourceKey)obj; + return this.tenantId.equals(that.tenantId) + && this.datasourceId.equals(that.datasourceId); + } else { + throw new IllegalArgumentException("obj is not a TenantDatasourceKey"); + } + } + + + /** + * @return the tenantId + */ + public String getTenantId() { + return tenantId; + } + + + /** + * @return the datasourceId + */ + public String getDatasourceId() { + return datasourceId; + } +} diff --git a/fhir-persistence-cassandra-app/pom.xml b/fhir-persistence-cassandra-app/pom.xml new file mode 100644 index 00000000000..55f337db130 --- /dev/null +++ b/fhir-persistence-cassandra-app/pom.xml @@ -0,0 +1,170 @@ + + 4.0.0 + + fhir-persistence-cassandra-app + + + com.ibm.fhir + fhir-parent + 4.11.0-SNAPSHOT + ../fhir-parent + + + + UTF-8 + + + + + ${project.groupId} + fhir-persistence + ${project.version} + + + ${project.groupId} + fhir-examples + test + + + ${project.groupId} + fhir-config + ${project.version} + + + ${project.groupId} + fhir-model + ${project.version} + + + ${project.groupId} + fhir-persistence-cassandra + ${project.version} + + + ${project.groupId} + fhir-persistence-jdbc + ${project.version} + + + ${project.groupId} + fhir-path + ${project.version} + + + ${project.groupId} + fhir-validation + ${project.version} + test + + + ${project.groupId} + fhir-validation + ${project.version} + test-jar + test + + + ${project.groupId} + fhir-model + ${project.version} + test-jar + test + + + ${project.groupId} + fhir-persistence + ${project.version} + test-jar + test + + + ${project.groupId} + fhir-search + ${project.version} + + + com.datastax.oss + java-driver-core + + + com.datastax.oss + java-driver-query-builder + + + + org.apache.derby + derby + true + + + org.apache.derby + derbytools + true + + + com.ibm.db2 + jcc + true + + + org.postgresql + postgresql + true + + + + jakarta.transaction + jakarta.transaction-api + provided + + + org.testng + testng + test + + + org.skyscreamer + jsonassert + test + + + + + + + org.apache.maven.plugins + maven-shade-plugin + + + package + + shade + + + true + cli + + + com.ibm.fhir.persistence.cassandra.app.Main + + + + + *:* + + META-INF/*.SF + META-INF/*.DSA + META-INF/*.RSA + + + + + + + + + + + diff --git a/fhir-persistence-cassandra-app/src/main/java/com/ibm/fhir/persistence/cassandra/app/Main.java b/fhir-persistence-cassandra-app/src/main/java/com/ibm/fhir/persistence/cassandra/app/Main.java new file mode 100644 index 00000000000..b532d11c6cc --- /dev/null +++ b/fhir-persistence-cassandra-app/src/main/java/com/ibm/fhir/persistence/cassandra/app/Main.java @@ -0,0 +1,250 @@ +/* + * (C) Copyright IBM Corp. 2021, 2022 + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.fhir.persistence.cassandra.app; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.Properties; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.ibm.fhir.config.FHIRConfigHelper; +import com.ibm.fhir.config.FHIRConfiguration; +import com.ibm.fhir.config.FHIRRequestContext; +import com.ibm.fhir.config.PropertyGroup; +import com.ibm.fhir.config.PropertyGroup.PropertyEntry; +import com.ibm.fhir.database.utils.model.DbType; +import com.ibm.fhir.persistence.cassandra.cql.CreateSchema; +import com.ibm.fhir.persistence.cassandra.cql.DatasourceSessions; +import com.ibm.fhir.persistence.cassandra.reconcile.PayloadReconciliation; + +/** + * Admin operations for the IBM FHIR Server payload offload support + * in Cassandra. + *
+ *  1. Bootstrap the offload keyspace and tables in Cassandra 
+ *  2. Run the reconciliation process to look for orphaned payload records
+ * 
+ */ +public class Main { + private static final Logger logger = Logger.getLogger(Main.class.getName()); + + private int replicationFactor = 0; + private String fhirConfigDir; + private String tenantId; + private String dsIdArg = "default"; + + // Perform the tenant bootstrap process + private boolean bootstrap; + + // Run the reconciliation process + private boolean reconcile; + + // When set, reconciliation reports inconsistencies but does not delete anything + private boolean dryRun; + + // Properties for the RDBMS connection + private Properties dbProperties = new Properties(); + + // The type of database we are talking to + private DbType dbType; + + /** + * Create the Cassandra keyspace and tables used to persist FHIR payloads + * @throws Exception + */ + private void bootstrapTenant() throws Exception { + FHIRRequestContext.set(new FHIRRequestContext(tenantId, "default")); + PropertyGroup pg = FHIRConfigHelper.getPropertyGroup(FHIRConfiguration.PROPERTY_PERSISTENCE_PAYLOAD); + if (pg != null) { + // Bootstrap for each of the datastore ids configured for the tenant + for (PropertyEntry pe: pg.getProperties()) { + final String dsId = pe.getName(); + PropertyGroup datasourceEntry = pg.getPropertyGroup(dsId); + if (datasourceEntry != null) { + final String datasourceType = datasourceEntry.getStringProperty("type"); + if ("cassandra".equalsIgnoreCase(datasourceType)) { + PropertyGroup connectionProperties = datasourceEntry.getPropertyGroup("connectionProperties"); + if (connectionProperties != null) { + final String tenantKeyspace = connectionProperties.getStringProperty("tenantKeyspace", tenantId); + bootstrapTenantDatasource(tenantKeyspace, dsId, replicationFactor); + } else { + throw new IllegalStateException("Missing connectionProperties in payload datasource: " + tenantId + "/" + dsId); + } + } + } else { + // configuration file is broken + throw new IllegalStateException("Datasource property is not a PropertyGroup: " + dsId); + } + } + } else { + throw new IllegalArgumentException("Tenant not found: " + tenantId); + } + } + + /** + * Create the Cassandra keyspace and tables for the given tenant and datastore ids. + * @param tenantId + * @param dsId + * @param replicationFactor + */ + private void bootstrapTenantDatasource(String tenantKeyspace, String dsId, int replicationFactor) { + CqlSession session = DatasourceSessions.getSessionForBootstrap(tenantId, dsId); + CreateSchema createSchema = new CreateSchema(tenantKeyspace); + createSchema.createKeyspace(session, "SimpleStrategy", replicationFactor); + createSchema.run(session); + } + + /** + * Parse the command-line arguments + * @param args + */ + private void parseArgs(String[] args) throws Exception { + + for (int i=0; i= 1, not " + replicationFactor); + } + } else { + throw new IllegalArgumentException("Missing value for --replication-factor"); + } + break; + case "--tenant-id": + if (++i < args.length) { + this.tenantId = args[i]; + } else { + throw new IllegalArgumentException("Missing value for --tenant-id"); + } + break; + case "--ds-id": + if (++i < args.length) { + this.dsIdArg = args[i]; + } else { + throw new IllegalArgumentException("Missing value for --ds-id"); + } + break; + case "--db-type": + if (++i < args.length) { + this.dbType = DbType.from(args[i]); + } else { + throw new IllegalArgumentException("Missing value for --db-type"); + } + break; + case "--db-properties": + if (++i < args.length) { + readDatabaseProperties(args[i]); + } else { + throw new IllegalArgumentException("Missing value for --db-properties"); + } + break; + case "--bootstrap": + this.bootstrap = true; + break; + case "--reconcile": + this.reconcile = true; + break; + case "--dry-run": + this.dryRun = true; + break; + } + } + + if (bootstrap) { + if (this.dryRun) { + // Just in case someone thinks dry-run applies to bootstrap + throw new IllegalArgumentException("--dry-run can only be used with --reconcile"); + } + if (replicationFactor == 0) { + logger.warning("Using default replication factor of 1 - not suitable for production"); + this.replicationFactor = 1; + } + } + } + + /** + * Run the reconciliation process which scans the payload store and checks that + * each record is supported by the correct meta-data in the RDBMS. + * @throws Exception + */ + private void runReconciliation() throws Exception { + PayloadReconciliation process = new PayloadReconciliation(this.tenantId, this.dsIdArg, dbProperties, dbType, dryRun); + process.run(); + } + + /** + * Read the properties file specified by path into the dbProperties + * @param path + * @throws Exception + */ + private void readDatabaseProperties(String path) throws IOException { + try (InputStream in = new FileInputStream(path)) { + this.dbProperties.load(in); + } catch (IOException x) { + logger.log(Level.SEVERE, "Error loading properties from database properties file '" + path + "'", x); + throw x; + } + } + + /** + * Perform the action requested on the command line + * @throws Exception + */ + private void process() throws Exception { + if (fhirConfigDir == null || fhirConfigDir.isEmpty()) { + throw new IllegalArgumentException("File config dir not configured"); + } + + File f = new File(this.fhirConfigDir); + if (!f.exists() || !f.isDirectory()) { + throw new IllegalArgumentException("--fhir-config-dir does not point to a directory: '" + this.fhirConfigDir + "'"); + } + + FHIRConfiguration.setConfigHome(fhirConfigDir); + if (this.bootstrap) { + bootstrapTenant(); + } + if (this.reconcile) { + runReconciliation(); + } + } + + /** + * Main entry point + * @param args + */ + public static void main(String[] args) { + + Main m = new Main(); + + try { + m.parseArgs(args); + try { + m.process(); + } finally { + DatasourceSessions.shutdown(); + } + } catch (Exception x) { + logger.log(Level.SEVERE, "[FAILED]", x); + System.exit(1); + } + } +} diff --git a/fhir-persistence-cassandra/README.md b/fhir-persistence-cassandra/README.md new file mode 100644 index 00000000000..86925b00c37 --- /dev/null +++ b/fhir-persistence-cassandra/README.md @@ -0,0 +1,106 @@ +# FHIR payload offload in Cassandra + +Experimental feature to support the storage of the JSON resource payload outside of the RDBMS. + +There are two projects: + +1. fhir-persistence-cassandra: implements the FHIRPayloadPersistence API +2. fhir-persistence-cassandra-app: utility to create Cassandra keyspace and tables as well as a tool to reconcile contents of the Cassandra datastore with the RDBMS. + +## Limitations + +Only suitable for development work. Should not be used for production or any instances +where PHI is involved until the client-server connection can be encrypted and supports +authentication. + +## Packaging + +As this feature is still experimental, it currently isn't packaged with the main server. Users +wishing to work with the feature must deploy the fhir-persistence-cassandra-x.y.z-buildid.jar +to the Liberty server userlib directory along with the Cassandra client driver and its +dependencies. + +For development, add `fhir-persistence-cassandra` as a dependency to the fhir-server project `pom.xml` +file. + +## Configuration + +``` + "persistence": { + "factoryClassname": "com.ibm.fhir.persistence.cassandra.FHIRPersistenceJDBCCassandraFactory", + "datasources": { + "default": { + ... + }, + }, + "payload": { + "default": { + "__comment": "Cassandra configuration for storing FHIR resource payload data", + "type": "cassandra", + "connectionProperties" : { + "contactPoints": [ + { "host": "a-cassandra-host", "port": 9042 } + ], + "localDatacenter": "datacenter1", + "tenantKeyspace": "keyspace_to_use", + "localCoreConnectionsPerHost": 1, + "localMaxConnectionsPerHost": 4, + "remoteCoreConnectionsPerHost": 1, + "removeMaxConnectionsPerHost": 4 + } + }, + } +``` + +## Bootstrap + +To create the Cassandra keyspace and tables, run the following: + +``` +java -jar /path/to/fhir-persistence-cassandra-app-*-SNAPSHOT-cli.jar \ + --fhir-config-dir /path/to/ibm/fhir/server/wlp/usr/servers/defaultServer --tenant-id [your-tenant-name] --bootstrap +``` + +The command will create a new keyspace for each of the datastores configured under the payload element in the tenant's +fhir-server-config.json file. In most cases, there will be a single datastore called `default`. + +## Reconciliation + +Reconciliation is a process used to scan the Cassandra resource payload tables and verify that each record is associated +with a parent record in the RDBMS. Discrepancies are flagged and can be deleted. + +To only identify discrepancies without deleting any records, include the `--dry-run` option: + +``` +java -jar /path/to/fhir-persistence-cassandra-app-*-SNAPSHOT-cli.jar \ + --fhir-config-dir /path/to/ibm/fhir/server/wlp/usr/servers/defaultServer \ + --db-type postgresql \ + --db-properties fhiradmin.properties \ + --tenant-id default \ + --ds-id default \ + --reconcile \ + --dry-run +``` + +The `fhirdb.properties` file is the same format as used by the fhir-persistence-schema CLI tool. For example +a connection to a local database used for development would look like this: + +``` +db.host=localhost +db.port=5432 +db.database=fhirdb +db.type=postgres +db.default.schema=fhirdata +user=fhiradmin +password=change-password +``` + +The current implementation does not support checkpointing so must be allowed to scan the entire dataset in one pass. + +## Running Cassandra in Docker + +Create a single node Cassandra container which can be used for development and testing of the payload persistence feature: + +``` +podman run --name fhircass1 -v ./data/fhircass1:/var/lib/cassandra:z -e CASSANDRA_CLUSTER_NAME=fhir -e CASSANDRA_DC=datacenter1 -e CASSANDRA_RACK=rack1 -e CASSANDRA_ENDPOINT_SNITCH=GossipingPropertyFileSnitch -d -p 9042:9042 cassandra:latest +``` diff --git a/fhir-persistence-cassandra/pom.xml b/fhir-persistence-cassandra/pom.xml index 5760029d95f..6f07005bd6e 100644 --- a/fhir-persistence-cassandra/pom.xml +++ b/fhir-persistence-cassandra/pom.xml @@ -26,12 +26,6 @@ fhir-examples test - - ${project.groupId} - fhir-persistence-schema - ${project.version} - provided - ${project.groupId} fhir-config @@ -108,5 +102,4 @@ test - - + \ No newline at end of file diff --git a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/CassandraPropertyGroupAdapter.java b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/CassandraPropertyGroupAdapter.java index d87bd4b677e..502bd56417d 100644 --- a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/CassandraPropertyGroupAdapter.java +++ b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/CassandraPropertyGroupAdapter.java @@ -1,5 +1,5 @@ /* - * (C) Copyright IBM Corp. 2021 + * (C) Copyright IBM Corp. 2021, 2022 * * SPDX-License-Identifier: Apache-2.0 */ @@ -25,7 +25,8 @@ public class CassandraPropertyGroupAdapter { public static final String PROP_HOST = "host"; public static final String PROP_PORT = "port"; public static final String PROP_LOCAL_DATACENTER = "localDatacenter"; - + public static final String PROP_TENANT_KEYSPACE = "tenantKeyspace"; + public static final String PROP_COMPRESS = "compress"; // The property group we are wrapping private final PropertyGroup propertyGroup; @@ -76,4 +77,17 @@ public String getLocalDatacenter() { throw new IllegalArgumentException("Property group not configured " + PROP_LOCAL_DATACENTER); } } -} + + /** + * Get the configured value for the keyspace to use for the tenant. + * @return + */ + public String getTenantKeyspace() { + try { + return propertyGroup.getStringProperty(PROP_TENANT_KEYSPACE); + } catch (Exception x) { + logger.log(Level.SEVERE, PROP_TENANT_KEYSPACE, x); + throw new IllegalArgumentException("Property group not configured " + PROP_TENANT_KEYSPACE); + } + } +} \ No newline at end of file diff --git a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/FHIRPersistenceJDBCCassandraFactory.java b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/FHIRPersistenceJDBCCassandraFactory.java index 3184f792305..101196a3d3f 100644 --- a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/FHIRPersistenceJDBCCassandraFactory.java +++ b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/FHIRPersistenceJDBCCassandraFactory.java @@ -1,11 +1,12 @@ /* - * (C) Copyright IBM Corp. 2020, 2021 + * (C) Copyright IBM Corp. 2020, 2022 * * SPDX-License-Identifier: Apache-2.0 */ package com.ibm.fhir.persistence.cassandra; +import com.ibm.fhir.persistence.cassandra.cql.DatasourceSessions; import com.ibm.fhir.persistence.cassandra.payload.FHIRPayloadPersistenceCassandraImpl; import com.ibm.fhir.persistence.exception.FHIRPersistenceException; import com.ibm.fhir.persistence.jdbc.FHIRPersistenceJDBCFactory; @@ -18,8 +19,14 @@ public class FHIRPersistenceJDBCCassandraFactory extends FHIRPersistenceJDBCFact @Override public FHIRPayloadPersistence getPayloadPersistence() throws FHIRPersistenceException { - // Store the payload in Cassandra - // TODO use a real strategy - return new FHIRPayloadPersistenceCassandraImpl(FHIRPayloadPersistenceCassandraImpl.defaultPartitionStrategy()); - }; + + // If payload persistence is configured for this tenant, provide + // the impl otherwise null + FHIRPayloadPersistence result = null; + if (DatasourceSessions.isPayloadPersistenceConfigured()) { + result = new FHIRPayloadPersistenceCassandraImpl(); + } + + return result; + } } diff --git a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/app/Main.java b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/app/Main.java deleted file mode 100644 index 6d48e668c39..00000000000 --- a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/app/Main.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * (C) Copyright IBM Corp. 2021 - * - * SPDX-License-Identifier: Apache-2.0 - */ - -package com.ibm.fhir.persistence.cassandra.app; - -import java.util.logging.Level; -import java.util.logging.Logger; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.ibm.fhir.config.FHIRConfigHelper; -import com.ibm.fhir.config.FHIRConfiguration; -import com.ibm.fhir.config.FHIRRequestContext; -import com.ibm.fhir.config.PropertyGroup; -import com.ibm.fhir.config.PropertyGroup.PropertyEntry; -import com.ibm.fhir.persistence.cassandra.cql.CreateSchema; -import com.ibm.fhir.persistence.cassandra.cql.DatasourceSessions; - -/** - * Bootstrap all the Cassandra databases for the given tenant - */ -public class Main { - private static final Logger logger = Logger.getLogger(Main.class.getName()); - - private static void bootstrapTenant(String tenantId) throws Exception { - FHIRRequestContext.set(new FHIRRequestContext(tenantId, "default")); - PropertyGroup pg = FHIRConfigHelper.getPropertyGroup(FHIRConfiguration.PROPERTY_DATASOURCES); - if (pg != null) { - for (PropertyEntry pe: pg.getProperties()) { - final String dsId = pe.getName(); - PropertyGroup datasourceEntry = pg.getPropertyGroup(dsId); - if (datasourceEntry != null) { - bootstrapTenantDatasource(tenantId, dsId); - } else { - // configuration file is broken - throw new IllegalStateException("Datasource property is not a PropertyGroup: " + dsId); - } - } - } else { - throw new IllegalArgumentException("Tenant not found: " + tenantId); - } - } - - private static void bootstrapTenantDatasource(String tenantId, String dsId) { - CqlSession session = DatasourceSessions.getSessionForBootstrap(tenantId, dsId); - CreateSchema createSchema = new CreateSchema(tenantId); - createSchema.createKeyspace(session, "SimpleStrategy", 2); - createSchema.run(session); - } - - public static void main(String[] args) { - if (args.length < 2) { - throw new IllegalArgumentException("Usage: java -jar fhir-persistence-cassandra-cli.jar "); - } - - final String fhirConfigDir = args[0]; - final String tenantId = args[1]; - try { - FHIRConfiguration.setConfigHome(fhirConfigDir); - - try { - bootstrapTenant(tenantId); - } finally { - DatasourceSessions.shutdown(); - } - } catch (Exception x) { - logger.log(Level.SEVERE, "bootstrap failed for tenant: " + tenantId, x); - } - } -} diff --git a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/cql/CqlDataUtil.java b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/cql/CqlDataUtil.java index 5912870b6dc..78ec1521bde 100644 --- a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/cql/CqlDataUtil.java +++ b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/cql/CqlDataUtil.java @@ -19,6 +19,10 @@ public class CqlDataUtil { private static final String NAME_PATTERN_RGX = "[a-zA-Z_]\\w*$"; private static final Pattern NAME_PATTERN = Pattern.compile(NAME_PATTERN_RGX); + // Note that this is just to check that only Base64 characters are used + private static final String B64_CHARS_RGX = "[a-zA-Z0-9+/=]+$"; + private static final Pattern B64_CHAR_PATTERN = Pattern.compile(B64_CHARS_RGX); + /** * Asserts that the given id is safe and will not escape a Cql statement * In this case, we can simply assert that it's a valid FHIR identifier @@ -32,6 +36,13 @@ public static void safeId(String id) { throw new IllegalArgumentException("Invalid identifier"); } } + + public static void safeBase64(String value) { + if (value == null || !isBase64Chars(value)) { + logger.log(Level.SEVERE, "Invalid characters for Base64: " + value); + throw new IllegalArgumentException("Invalid Base64"); + } + } /** * Check that the name is a valid object name for Cassandra. @@ -42,4 +53,15 @@ public static boolean isValidName(String name) { Matcher m = NAME_PATTERN.matcher(name); return m.matches() && name.length() <= 128; } + + /** + * Check that the name contains only characters used in a Base64 encoded + * string. Note that this does not assert that the string is valid Base64. + * @param name + * @return + */ + public static boolean isBase64Chars(String name) { + Matcher m = B64_CHAR_PATTERN.matcher(name); + return m.matches(); + } } \ No newline at end of file diff --git a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/cql/CqlReadResource.java b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/cql/CqlReadResource.java deleted file mode 100644 index c972281dc22..00000000000 --- a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/cql/CqlReadResource.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * (C) Copyright IBM Corp. 2020, 2021 - * - * SPDX-License-Identifier: Apache-2.0 - */ - -package com.ibm.fhir.persistence.cassandra.cql; - -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.nio.charset.StandardCharsets; -import java.util.logging.Level; -import java.util.logging.Logger; -import java.util.zip.GZIPInputStream; - -import com.datastax.oss.driver.api.core.CqlSession; -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; -import com.ibm.fhir.model.format.Format; -import com.ibm.fhir.model.parser.FHIRParser; -import com.ibm.fhir.model.parser.exception.FHIRParserException; -import com.ibm.fhir.model.resource.Resource; -import com.ibm.fhir.persistence.cassandra.payload.CqlChunkedPayloadStream; -import com.ibm.fhir.persistence.exception.FHIRPersistenceException; - -/** - * Reads the latest version of a resource - */ -public class CqlReadResource implements ICqlReader { - private static final Logger logger = Logger.getLogger(CqlReadResource.class.getName()); - - // The partition_id key value - private final String partitionId; - - // The resource_type_id key value - private final int resourceTypeId; - - // The logical_id key value - private final String logicalId; - - /** - * Public constructor - * @param partitionId - * @param resourceTypeId - * @param logicalId - */ - public CqlReadResource(String partitionId, int resourceTypeId, String logicalId) { - CqlDataUtil.safeId(partitionId); - CqlDataUtil.safeId(logicalId); - this.partitionId = partitionId; - this.resourceTypeId = resourceTypeId; - this.logicalId = logicalId; - } - - @Override - public Resource run(CqlSession session) throws FHIRPersistenceException { - - // Firstly, look up the payload_id for the latest version of the resource - final String CQL_LOGICAL_RESOURCE = "" - + "SELECT payload_id, current_version, last_modified FROM logical_resources " - + " WHERE partition_id = " + partitionId - + " AND resource_type_id = " + resourceTypeId - + " AND logical_id = ? "; - - ResultSet lrResult = session.execute(CQL_LOGICAL_RESOURCE); - Row row = lrResult.one(); - if (row != null) { - - final String CQL_PAYLOAD_CHUNKS = "" - + " SELECT ordinal, chunk " - + " FROM payload_chunks " - + " WHERE partition_id = ? " - + " AND payload_id = ? " - + "ORDER BY ordinal "; - - ResultSet chunks = session.execute(CQL_PAYLOAD_CHUNKS); - - try { - // Read the result rows as a continuous stream - InputStream in = new GZIPInputStream(new CqlChunkedPayloadStream(chunks)); - return FHIRParser.parser(Format.JSON).parse(new InputStreamReader(in, StandardCharsets.UTF_8)); - } catch (IOException x) { - logger.log(Level.SEVERE, "Error reading resource partition_id=" + partitionId + ", resourceTypeId=" + resourceTypeId - + ", logicalId=" + logicalId); - throw new CqlPersistenceException("error reading resource", x); - } catch (FHIRParserException x) { - // particularly bad...resources are validated before being saved, so if we get a - // parse failure here that's not IO related, it's not good (database altered?) - logger.log(Level.SEVERE, "Error parsing resource partition_id=" + partitionId + ", resourceTypeId=" + resourceTypeId - + ", logicalId=" + logicalId); - throw new CqlPersistenceException("parse resource failed", x); - } - } else { - // resource not found - if (logger.isLoggable(Level.FINE)) { - logger.fine("Resource not found; partition_id=" + partitionId + ", resourceTypeId=" + resourceTypeId - + ", logicalId=" + logicalId); - } - return null; - } - } -} \ No newline at end of file diff --git a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/cql/CqlSessionWrapper.java b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/cql/CqlSessionWrapper.java new file mode 100644 index 00000000000..3a1a687172d --- /dev/null +++ b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/cql/CqlSessionWrapper.java @@ -0,0 +1,112 @@ +/* + * (C) Copyright IBM Corp. 2021 + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.fhir.persistence.cassandra.cql; + +import java.util.Optional; +import java.util.concurrent.CompletionStage; +import java.util.logging.Logger; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.context.DriverContext; +import com.datastax.oss.driver.api.core.metadata.Metadata; +import com.datastax.oss.driver.api.core.metrics.Metrics; +import com.datastax.oss.driver.api.core.session.Request; +import com.datastax.oss.driver.api.core.type.reflect.GenericType; + +/** + * A wrapper so that we can intercept {@link #close()} calls + */ +public class CqlSessionWrapper implements CqlSession { + private static final Logger logger = Logger.getLogger(CqlSessionWrapper.class.getName()); + + // the actual CqlSession we delegate calls to + private final CqlSession delegate; + + /** + * Public constructor + * @param delegate + */ + public CqlSessionWrapper(CqlSession delegate) { + this.delegate = delegate; + } + + @Override + public String getName() { + return delegate.getName(); + } + + @Override + public Metadata getMetadata() { + return delegate.getMetadata(); + } + + @Override + public boolean isSchemaMetadataEnabled() { + return delegate.isSchemaMetadataEnabled(); + } + + @Override + public CompletionStage setSchemaMetadataEnabled(Boolean newValue) { + return delegate.setSchemaMetadataEnabled(newValue); + } + + @Override + public CompletionStage refreshSchemaAsync() { + return delegate.refreshSchemaAsync(); + } + + @Override + public CompletionStage checkSchemaAgreementAsync() { + return delegate.checkSchemaAgreementAsync(); + } + + @Override + public DriverContext getContext() { + return delegate.getContext(); + } + + @Override + public Optional getKeyspace() { + return delegate.getKeyspace(); + } + + @Override + public Optional getMetrics() { + return delegate.getMetrics(); + } + + @Override + public ResultT execute(RequestT request, GenericType resultType) { + return delegate.execute(request, resultType); + } + + @Override + public CompletionStage closeFuture() { + logger.warning("closeFuture called on session wrapper"); + return delegate.closeFuture(); + } + + @Override + public CompletionStage closeAsync() { + logger.warning("closeAsync called on session wrapper"); + return delegate.closeAsync(); + } + + @Override + public CompletionStage forceCloseAsync() { + logger.warning("forceCloseAsync called on session wrapper"); + return delegate.forceCloseAsync(); + } + + @Override + public void close() { + // Intercept the close call so that try-with-resource patterns don't actually + // close the session. Only the DatasourceSessions object should close the + // the real CqlSession. + } +} \ No newline at end of file diff --git a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/cql/CreateSchema.java b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/cql/CreateSchema.java index 762cf6bd9e2..a63bcf2e725 100644 --- a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/cql/CreateSchema.java +++ b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/cql/CreateSchema.java @@ -6,10 +6,9 @@ package com.ibm.fhir.persistence.cassandra.cql; -import static com.ibm.fhir.persistence.cassandra.cql.SchemaConstants.LOGICAL_RESOURCES; import static com.ibm.fhir.persistence.cassandra.cql.SchemaConstants.PAYLOAD_CHUNKS; -import static com.ibm.fhir.persistence.cassandra.cql.SchemaConstants.PAYLOAD_TRACKING; -import static com.ibm.fhir.persistence.cassandra.cql.SchemaConstants.PAYLOAD_RECONCILIATION; +import static com.ibm.fhir.persistence.cassandra.cql.SchemaConstants.RESOURCE_PAYLOADS; +import static com.ibm.fhir.persistence.cassandra.cql.SchemaConstants.RESOURCE_VERSIONS; import java.util.logging.Logger; @@ -46,8 +45,7 @@ public void run(CqlSession session) { useKeyspace(session); createLogicalResourcesTable(session); createPayloadChunksTable(session); - createPayloadTrackingTable(session); - createPayloadReconciliationTable(session); + createResourceVersionsTable(session); logger.info("Schema definition complete for keySpace '" + this.keySpace + "'"); } @@ -75,88 +73,63 @@ protected void useKeyspace(CqlSession session) { } protected void createLogicalResourcesTable(CqlSession session) { - // partition by partition_id (application-defined, like patient logical id) - // cluster within each partition by resource_type_id, logical_id, version - final String cql = "CREATE TABLE IF NOT EXISTS " + LOGICAL_RESOURCES + " (" - + "partition_id text, " - + "resource_type_id int, " - + "logical_id text, " - + "version int, " - + "last_modified timestamp, " - + "payload_id text, " - + "chunk blob, " - + "parameter_block blob, " - + "PRIMARY KEY (partition_id, resource_type_id, logical_id, version)" - + ") WITH CLUSTERING ORDER BY (resource_type_id ASC, logical_id ASC, version DESC)"; + // partition by resource_type_id, logical_id, version + // The resource_payload_key is assigned by the server and is used to help + // manage rollbacks in a concurrent scenario (so the rollback only removes + // rows created within the transaction that was rolled back). + final String cql = "CREATE TABLE IF NOT EXISTS " + RESOURCE_PAYLOADS + " (" + + "resource_type_id int, " + + "logical_id text, " + + "version int, " + + "resource_payload_key text, " + + "last_modified timestamp, " + + "chunk blob, " + + "parameter_block blob, " + + "PRIMARY KEY ((resource_type_id, logical_id, version), resource_payload_key)" + + ") WITH CLUSTERING ORDER BY (resource_payload_key ASC)"; logger.info("Running: " + cql); session.execute(cql); } /** - * In Cassandra, blobs are limited to 2GB, but the document states that the practical limit - * is less than 1MB. To avoid issues when storing arbitrarily large FHIR resources, the - * payload is compressed and stored as a series of chunks each <= 1MB. Chunk order is - * maintained by the ordinal field. + * Create a table to identify the versions associated with each logical resource. + * The partition id for this table is based on only {resource_type_id, logical_id} + * allowing the application to select the list of versions. The versions are also + * clustered with descending order, making it trivial to identify the latest version * @param session */ - protected void createPayloadChunksTable(CqlSession session) { - // partition by partition_id (application-defined, like patient logical id) - // cluster within each partition by resource_type_id, payload_id - final String cql = "CREATE TABLE IF NOT EXISTS " + PAYLOAD_CHUNKS + " (" - + "payload_id text, " - + "ordinal int, " - + "chunk blob, " - + "PRIMARY KEY (payload_id, ordinal)" - + ") WITH CLUSTERING ORDER BY (ordinal ASC)"; - - logger.info("Running: " + cql); - session.execute(cql); - } - - /** - * Create the table to track the insertion of payload records. This is used - * by the reconciliation process to make sure that payload records are attached - * to a logical resource in the RDBMS system of record. Records in this table - * can be removed once they have been reconciled, but this is not required. - * @param session - */ - protected void createPayloadTrackingTable(CqlSession session) { - // partition by partition_id (application-defined, like patient logical id) - // cluster within each partition by resource_type_id, payload_id - final String cql = "CREATE TABLE IF NOT EXISTS " + PAYLOAD_TRACKING + " (" - + "partition_id smallint, " - + "tstamp bigint, " - + "resource_type_id int, " - + "logical_id text, " - + "version int, " - + "payload_partition_id text, " - + "PRIMARY KEY (partition_id, tstamp, resource_type_id, logical_id, version)" - + ") WITH CLUSTERING ORDER BY (tstamp ASC)"; + protected void createResourceVersionsTable(CqlSession session) { + // partition by resource_type_id, logical_id + final String cql = "CREATE TABLE IF NOT EXISTS " + RESOURCE_VERSIONS + " (" + + "resource_type_id int, " + + "logical_id text, " + + "version int, " + + "resource_payload_key text, " + + "PRIMARY KEY ((resource_type_id, logical_id), version, resource_payload_key)" + + ") WITH CLUSTERING ORDER BY (version DESC)"; logger.info("Running: " + cql); session.execute(cql); } /** - * Create the table to track the reconciliation of payload records. This tells - * the reconciliation service where to start scanning the payload_tracking table - * within each partition. The reconciliation scanner needs to be careful to - * avoid issues with clock drift in clusters so should stop attempting to - * reconcile records more recent than the ingestion transaction timeout value (e.g. - * 2 minutes by default). Some systems may be configured with even larger - * timeouts, so this must be taken into account. This is to ensure that - * reconciliation doesn't miss records which appear after but with timestamps before - * the latest tstamp in a given partition. Handling this doesn't generate any - * logical inconsistencies, but may require a small amount of work to be repeated. + * In Cassandra, blobs are limited to 2GB, but the document states that the practical limit + * is less than 1MB. To avoid issues when storing arbitrarily large FHIR resources, the + * payload is compressed and stored as a series of chunks each <= 1MB. Chunk order is + * maintained by the ordinal field. * @param session */ - protected void createPayloadReconciliationTable(CqlSession session) { - final String cql = "CREATE TABLE IF NOT EXISTS " + PAYLOAD_RECONCILIATION + " (" - + "partition_id smallint, " // FK to payload_tracking table - + "tstamp bigint " // FK to payload_tracking table - + "PRIMARY KEY (partition_id, tstamp)" - + ") WITH CLUSTERING ORDER BY (tstamp ASC)"; + protected void createPayloadChunksTable(CqlSession session) { + // The resource_payload_key is unique for each RESOURCE_PAYLOADS record + // so is used here a foreign key (parent) and also acts as the partition + // id for this table + final String cql = "CREATE TABLE IF NOT EXISTS " + PAYLOAD_CHUNKS + " (" + + "resource_payload_key text, " + + "ordinal int, " + + "chunk blob, " + + "PRIMARY KEY (resource_payload_key, ordinal)" + + ") WITH CLUSTERING ORDER BY (ordinal ASC)"; logger.info("Running: " + cql); session.execute(cql); diff --git a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/cql/DatasourceSessions.java b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/cql/DatasourceSessions.java index e7ae26b32e6..02a751f250a 100644 --- a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/cql/DatasourceSessions.java +++ b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/cql/DatasourceSessions.java @@ -23,13 +23,18 @@ import com.ibm.fhir.persistence.cassandra.ContactPoint; /** - * Singleton to manage Cassandra CqlSession connections for each FHIR tenant/datasource + * Singleton to manage Cassandra CqlSession connections for each FHIR tenant/datasource. + * CqlSession holds the state of the cluster and is thread-safe. There should be a single + * value of CqlSession for a given tenant/datasource and this shouldn't be closed by the + * application until shutdown (handled by the EventCallback server lifecycle events). */ public class DatasourceSessions implements EventCallback { private static final Logger logger = Logger.getLogger(DatasourceSessions.class.getName()); + // Map holding one CqlSession instance per tenant/datasource private final ConcurrentHashMap sessionMap = new ConcurrentHashMap<>(); + // so we can reject future requests when shut down private volatile boolean running = true; /** @@ -66,8 +71,10 @@ public static CqlSession getSessionForTenantDatasource() { /** * Get or create the CqlSession connection to Cassandra for the current - * tenant/datasource - * @return + * tenant/datasource. The wrapped instance intercepts calls to {@link AutoCloseable#close()}. + * Users do not need to close the object, but may do so (for instance in + * a try-with-resource pattern). + * @return a wrapped instance of the CqlSession for which {@link AutoCloseable#close()} is a NOP */ private CqlSession getOrCreateSession() { if (!running) { @@ -81,20 +88,35 @@ private CqlSession getOrCreateSession() { TenantDatasourceKey key = new TenantDatasourceKey(tenantId, dsId); // Get the session for this tenant/datasource, or create a new one if needed - return sessionMap.computeIfAbsent(key, DatasourceSessions::newSession); + CqlSession cs = sessionMap.computeIfAbsent(key, DatasourceSessions::newSession); + + // Wrap the session so we can intercept calls to #close + return new CqlSessionWrapper(cs); } /** - * Build a new CqlSession object for the tenant/datasource. + * Build a new CqlSession object for the tenant/datasource tuple described by key. * @param key * @return */ private static CqlSession newSession(TenantDatasourceKey key) { - - String dsPropertyName = FHIRConfiguration.PROPERTY_DATASOURCES + "/" + key.getDatasourceId(); + String dsPropertyName = FHIRConfiguration.PROPERTY_PERSISTENCE_PAYLOAD + "/" + key.getDatasourceId(); CassandraPropertyGroupAdapter adapter = getPropertyGroupAdapter(dsPropertyName); return getDatabaseSession(key, adapter, true); } + + /** + * Check if payload persistence is configured for the current tenant/datasource + * @return + */ + public static boolean isPayloadPersistenceConfigured() { + final String tenantId = FHIRRequestContext.get().getTenantId(); + final String dsId = FHIRRequestContext.get().getDataStoreId(); + TenantDatasourceKey key = new TenantDatasourceKey(tenantId, dsId); + String dsPropertyName = FHIRConfiguration.PROPERTY_PERSISTENCE_PAYLOAD + "/" + key.getDatasourceId(); + PropertyGroup dsPG = FHIRConfigHelper.getPropertyGroup(dsPropertyName); + return dsPG != null; + } /** * Get a CassandraPropertyGroupAdapter bound to the property group described by @@ -151,8 +173,12 @@ private static CqlSession getDatabaseSession(TenantDatasourceKey key, CassandraP builder.withLocalDatacenter(adapter.getLocalDatacenter()); if (setKeyspace) { - // Use the tenant id value directly as for the keyspace - builder.withKeyspace(key.getTenantId()); + String tenantKeyspace = adapter.getTenantKeyspace(); + if (tenantKeyspace == null || tenantKeyspace.isEmpty()) { + // Use the tenant id value directly as for the keyspace + tenantKeyspace = key.getTenantId(); + } + builder.withKeyspace(tenantKeyspace); } return builder.build(); @@ -167,7 +193,7 @@ private static CqlSession getDatabaseSession(TenantDatasourceKey key, CassandraP */ public static CqlSession getSessionForBootstrap(String tenantId, String dsId) { - String dsPropertyName = FHIRConfiguration.PROPERTY_DATASOURCES + "/" + dsId; + String dsPropertyName = FHIRConfiguration.PROPERTY_PERSISTENCE_PAYLOAD + "/" + dsId; TenantDatasourceKey key = new TenantDatasourceKey(tenantId, dsId); CassandraPropertyGroupAdapter adapter = getPropertyGroupAdapter(dsPropertyName); return getDatabaseSession(key, adapter, false); diff --git a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/cql/SchemaConstants.java b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/cql/SchemaConstants.java index b85a58a8583..7208aade168 100644 --- a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/cql/SchemaConstants.java +++ b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/cql/SchemaConstants.java @@ -14,7 +14,8 @@ public class SchemaConstants { // Break binary data into bite-sized pieces when storing public static final int CHUNK_SIZE = 1024 * 1024; - public static final String LOGICAL_RESOURCES = "logical_resources"; + public static final String RESOURCE_PAYLOADS = "resource_payloads"; + public static final String RESOURCE_VERSIONS = "resource_versions"; public static final String PAYLOAD_CHUNKS = "payload_chunks"; public static final String PAYLOAD_TRACKING = "payload_tracking"; public static final String PAYLOAD_RECONCILIATION = "payload_reconciliation"; diff --git a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/CqlChunkedPayloadStream.java b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/CqlChunkedPayloadStream.java index 1f1d7fb74c7..60816a885c0 100644 --- a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/CqlChunkedPayloadStream.java +++ b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/CqlChunkedPayloadStream.java @@ -9,9 +9,7 @@ import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; - -import com.datastax.oss.driver.api.core.cql.ResultSet; -import com.datastax.oss.driver.api.core.cql.Row; +import java.util.logging.Logger; /** * Reads the payload from CQL. Cassandra imposes both hard and practical @@ -21,22 +19,20 @@ * presents the results as an ordinary stream of bytes */ public class CqlChunkedPayloadStream extends InputStream { + private static final Logger logger = Logger.getLogger(CqlChunkedPayloadStream.class.getName()); // Buffer containing bytes from the current row of the result private ByteBuffer buffer; - // The ResultSet we are adapting as a stream - private final ResultSet resultSet; - - // Track each ordinal returned so we can detect gaps - private int currentOrdinal = -1; + // The provider we use to get the sequence of buffers to read from + private final IBufferProvider bufferProvider; /** * Public constructor * @param rs */ - public CqlChunkedPayloadStream(ResultSet rs) { - this.resultSet = rs; + public CqlChunkedPayloadStream(IBufferProvider bp) { + this.bufferProvider = bp; } @Override @@ -46,15 +42,24 @@ public int read() throws IOException { if (buffer == null || !buffer.hasRemaining()) { return -1; // no more data } else { - return buffer.get(); // get the next byte, returned as an int + // get the next byte from the buffer which we know will not underflow + return Byte.toUnsignedInt(buffer.get()); } } @Override public int read(byte[] dst, int off, int len) throws IOException { + // remember that read is allowed to return less than len bytes + // although never 0 bytes, unless len == 0 + if (len == 0) { + return 0; + } + + // Fetch a buffer from the ResultSet if there's no data currently refreshBuffer(); if (buffer == null || !buffer.hasRemaining()) { + // Because we just refreshed, if hasRemaining is false, it's EOF return -1; } else { // fetch as much as requested, up to the number of bytes remaining @@ -72,26 +77,8 @@ public int read(byte[] dst, int off, int len) throws IOException { */ private void refreshBuffer() throws IOException { if (buffer == null || !buffer.hasRemaining()) { - Row row = resultSet.one(); - if (row != null) { - if (row.isNull(2)) { - throw new IllegalStateException("buffer value must not be null"); - } - - // check for gaps...which would break what is supposed to be a continuous stream of data - int rowOrdinal = row.getInt(1); - int gap = rowOrdinal - currentOrdinal; - if (gap != 1) { - throw new IOException("Gap in chunk ordinal. ResultSet not ordered by ordinal, or a row is missing"); - } - - // column 2 of the result set should be a blob which we can consume as - // a byte buffer - this.buffer = row.getByteBuffer(2); - } else { - // no more data - this.buffer = null; - } + // get the next buffer in the sequence, or null at the end + buffer = bufferProvider.nextBuffer(); } } } \ No newline at end of file diff --git a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/CqlDeletePayload.java b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/CqlDeletePayload.java new file mode 100644 index 00000000000..90a7d619c60 --- /dev/null +++ b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/CqlDeletePayload.java @@ -0,0 +1,228 @@ +/* + * (C) Copyright IBM Corp. 2021, 2022 + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.fhir.persistence.cassandra.payload; + +import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.bindMarker; +import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.deleteFrom; +import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; +import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; +import static com.ibm.fhir.persistence.cassandra.cql.SchemaConstants.PAYLOAD_CHUNKS; +import static com.ibm.fhir.persistence.cassandra.cql.SchemaConstants.RESOURCE_PAYLOADS; +import static com.ibm.fhir.persistence.cassandra.cql.SchemaConstants.RESOURCE_VERSIONS; + +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.BoundStatement; +import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder; +import com.datastax.oss.driver.api.core.cql.PreparedStatement; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.querybuilder.select.Select; +import com.ibm.fhir.persistence.exception.FHIRPersistenceException; +import com.ibm.fhir.persistence.jdbc.exception.FHIRPersistenceDataAccessException; + +/** + * DAO to delete all the records associated with this resource payload + */ +public class CqlDeletePayload { + private static final Logger logger = Logger.getLogger(CqlDeletePayload.class.getName()); + + // The int id representing the resource type (much shorter than the string name) + private final int resourceTypeId; + + // The logical identifier we have assigned to the resource + private final String logicalId; + + // The version, or null for all versions + private final Integer version; + + // The resourcePayloadKey value, or null if all matches should be deleted + private final String resourcePayloadKey; + + /** + * Public constructor + * @param resourceTypeId + * @param logicalId + * @param version + */ + public CqlDeletePayload(int resourceTypeId, String logicalId, Integer version, String resourcePayloadKey) { + this.resourceTypeId = resourceTypeId; + this.logicalId = logicalId; + this.version = version; + this.resourcePayloadKey = resourcePayloadKey; + } + + /** + * Hard delete the payload records for the configured resource. + * @param session + */ + public void run(CqlSession session) throws FHIRPersistenceException { + + // We need to drive the deletion of both resource_payloads and + // payload_chunks using a select from resource_versions to obtain + // the resource_payload_key. Using this key ensures we can maintain + // (eventual) consistency between the tables in a non-transactional + // system with concurrent activity + final BoundStatement bs; + if (version != null) { + if (resourcePayloadKey != null) { + final Select statement = + selectFrom("resource_versions") + .column("version") + .column("resource_payload_key") + .whereColumn("resource_type_id").isEqualTo(literal(resourceTypeId)) + .whereColumn("logical_id").isEqualTo(bindMarker()) + .whereColumn("version").isEqualTo(bindMarker()) + .whereColumn("resource_payload_key").isEqualTo(bindMarker()) + ; + PreparedStatement ps = session.prepare(statement.build()); + bs = ps.bind(logicalId, version, resourcePayloadKey); + } else { + final Select statement = + selectFrom("resource_versions") + .column("version") + .column("resource_payload_key") + .whereColumn("resource_type_id").isEqualTo(literal(resourceTypeId)) + .whereColumn("logical_id").isEqualTo(bindMarker()) + .whereColumn("version").isEqualTo(bindMarker()) + ; + PreparedStatement ps = session.prepare(statement.build()); + bs = ps.bind(logicalId, version); + } + } else { + // To find the versions, we need to use the resource_versions tables + final Select statement = + selectFrom("resource_versions") + .column("version") + .column("resource_payload_key") + .whereColumn("resource_type_id").isEqualTo(literal(resourceTypeId)) + .whereColumn("logical_id").isEqualTo(bindMarker()) + ; + PreparedStatement ps = session.prepare(statement.build()); + bs = ps.bind(logicalId); + } + + try { + ResultSet rs = session.execute(bs); + + // Can't use forEach because we have to propagate checked exceptions + for (Row row = rs.one(); row != null; row = rs.one()) { + // Remove any chunks we've stored using the resource_payload_key + final int version = row.getInt(0); + final String resourcePayloadKey = row.getString(1); + deletePayloadChunks(session, resourcePayloadKey); + + // And the specific resource_payloads row we just selected + deleteResourcePayloads(session, version, resourcePayloadKey); + + // Because resource_versions is used to enumerate the versions, + // for safety reasons we do this delete last + deleteResourceVersion(session, version, resourcePayloadKey); + } + } catch (Exception x) { + logger.log(Level.SEVERE, "delete failed for '" + + resourceTypeId + "/" + logicalId + "/" + version + "'", x); + + // don't propagate potentially sensitive info + throw new FHIRPersistenceDataAccessException("Delete failed. See server log for details."); + } + } + + /** + * Hard delete all the resource record entries from the RESOURCE_PAYLOADS table + * for the configured resourceTypeId/logicalId and given version and + * resourcePayloadKey + * @param session + * @param version + * @param resourcePayloadKey + * @throws FHIRPersistenceException + */ + private void deleteResourcePayloads(CqlSession session, int version, String resourcePayloadKey) throws FHIRPersistenceException { + + final SimpleStatement del; + final PreparedStatement ps; + final BoundStatementBuilder bsb; + + del = deleteFrom(RESOURCE_PAYLOADS) + .whereColumn("resource_type_id").isEqualTo(literal(resourceTypeId)) + .whereColumn("logical_id").isEqualTo(bindMarker()) + .whereColumn("version").isEqualTo(bindMarker()) + .whereColumn("resource_payload_key").isEqualTo(bindMarker()) + .build(); + ps = session.prepare(del); + bsb = ps.boundStatementBuilder(logicalId, version, resourcePayloadKey); + + try { + session.execute(bsb.build()); + } catch (Exception x) { + logger.log(Level.SEVERE, "delete from resource_payloads failed for '" + + resourceTypeId + "/" + logicalId + "/" + version + "'", x); + throw new FHIRPersistenceDataAccessException("Failed deleting from " + RESOURCE_PAYLOADS); + } + } + + /** + * Hard delete all the resource record entries from the RESOURCE_PAYLOADS table + * for the configured resourceTypeId/logicalId and given version and + * resourcePayloadKey + * @param session + * @param version + * @param resourcePayloadKey + * @throws FHIRPersistenceException + */ + private void deleteResourceVersion(CqlSession session, int version, String resourcePayloadKey) throws FHIRPersistenceException { + + final SimpleStatement del; + final PreparedStatement ps; + final BoundStatementBuilder bsb; + + del = deleteFrom(RESOURCE_VERSIONS) + .whereColumn("resource_type_id").isEqualTo(literal(resourceTypeId)) + .whereColumn("logical_id").isEqualTo(bindMarker()) + .whereColumn("version").isEqualTo(bindMarker()) + .whereColumn("resource_payload_key").isEqualTo(bindMarker()) + .build(); + ps = session.prepare(del); + bsb = ps.boundStatementBuilder(logicalId, version, resourcePayloadKey); + + try { + session.execute(bsb.build()); + } catch (Exception x) { + logger.log(Level.SEVERE, "delete from resource_payloads failed for '" + + resourceTypeId + "/" + logicalId + "/" + version + "'", x); + throw new FHIRPersistenceDataAccessException("Failed deleting from " + RESOURCE_PAYLOADS); + } + } + + /** + * Hard delete all the resource record entries from the PAYLOAD_CHUNKS table + * for the given resourcePayloadKey + * @param session + * @param resourcePayloadKey + * @throws FHIRPersistenceException + */ + private void deletePayloadChunks(CqlSession session, String resourcePayloadKey) throws FHIRPersistenceException { + final SimpleStatement del; + + del = deleteFrom(PAYLOAD_CHUNKS) + .whereColumn("resource_payload_key").isEqualTo(bindMarker()) + .build(); + final PreparedStatement ps = session.prepare(del); + final BoundStatementBuilder bsb = ps.boundStatementBuilder(resourcePayloadKey); + + try { + session.execute(bsb.build()); + } catch (Exception x) { + logger.log(Level.SEVERE, "delete from payload_chunks failed for '" + + resourceTypeId + "/" + logicalId + "[resourcePayloadKey=" + resourcePayloadKey + "]'", x); + throw new FHIRPersistenceDataAccessException("Failed deleting from " + PAYLOAD_CHUNKS); + } + } +} \ No newline at end of file diff --git a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/CqlGetCurrentVersion.java b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/CqlGetCurrentVersion.java index 281afa53e0d..a75e5abad0c 100644 --- a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/CqlGetCurrentVersion.java +++ b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/CqlGetCurrentVersion.java @@ -1,5 +1,5 @@ /* - * (C) Copyright IBM Corp. 2020, 2021 + * (C) Copyright IBM Corp. 2020, 2022 * * SPDX-License-Identifier: Apache-2.0 */ @@ -20,15 +20,11 @@ import com.ibm.fhir.persistence.cassandra.cql.CqlDataUtil; /** - * Reads the current version number of a resource. This is used - * during ingestion so that we can track version history. + * Reads the current version number of a resource. */ public class CqlGetCurrentVersion { private static final Logger logger = Logger.getLogger(CqlGetCurrentVersion.class.getName()); - // The partition_id key value - private final String partitionId; - // The resource_type_id key value private final int resourceTypeId; @@ -37,14 +33,11 @@ public class CqlGetCurrentVersion { /** * Public constructor - * @param partitionId * @param resourceTypeId * @param logicalId */ - public CqlGetCurrentVersion(String partitionId, int resourceTypeId, String logicalId) { - CqlDataUtil.safeId(partitionId); + public CqlGetCurrentVersion(int resourceTypeId, String logicalId) { CqlDataUtil.safeId(logicalId); - this.partitionId = partitionId; this.resourceTypeId = resourceTypeId; this.logicalId = logicalId; } @@ -57,14 +50,11 @@ public CqlGetCurrentVersion(String partitionId, int resourceTypeId, String logic public int run(CqlSession session) { int result; - // Firstly, look up the payload_id for the latest version of the resource. The table - // is already ordered by version DESC so we don't need to do any explicit order by to - // get the most recent version. Simply picking the first row which matches the given - // resourceType/logicalId is sufficient. + // The resource_versions table is already ordered by version DESC so we don't need + // to do any explicit order by to get the most recent version. Select statement = - selectFrom("logical_resources") + selectFrom("resource_versions") .column("version") - .whereColumn("partition_id").isEqualTo(literal(partitionId)) .whereColumn("resource_type_id").isEqualTo(literal(resourceTypeId)) .whereColumn("logical_id").isEqualTo(bindMarker()) .limit(1) diff --git a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/CqlReadResource.java b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/CqlReadResource.java index 463d7adbdc3..19c9b1bfa7d 100644 --- a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/CqlReadResource.java +++ b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/CqlReadResource.java @@ -1,5 +1,5 @@ /* - * (C) Copyright IBM Corp. 2020, 2021 + * (C) Copyright IBM Corp. 2020, 2022 * * SPDX-License-Identifier: Apache-2.0 */ @@ -16,7 +16,6 @@ import java.util.List; import java.util.logging.Level; import java.util.logging.Logger; -import java.util.zip.GZIPInputStream; import com.datastax.oss.driver.api.core.CqlSession; import com.datastax.oss.driver.api.core.cql.PreparedStatement; @@ -24,16 +23,12 @@ import com.datastax.oss.driver.api.core.cql.Row; import com.datastax.oss.driver.api.core.metadata.schema.ClusteringOrder; import com.datastax.oss.driver.api.querybuilder.select.Select; -import com.ibm.fhir.model.format.Format; -import com.ibm.fhir.model.parser.FHIRJsonParser; -import com.ibm.fhir.model.parser.FHIRParser; import com.ibm.fhir.model.parser.exception.FHIRParserException; import com.ibm.fhir.model.resource.Resource; -import com.ibm.fhir.model.util.FHIRUtil; -import com.ibm.fhir.persistence.cassandra.cql.CqlDataUtil; +import com.ibm.fhir.persistence.FHIRPersistenceSupport; import com.ibm.fhir.persistence.cassandra.cql.CqlPersistenceException; import com.ibm.fhir.persistence.exception.FHIRPersistenceException; -import com.ibm.fhir.search.SearchConstants; +import com.ibm.fhir.persistence.util.InputOutputByteStream; /** * CQL command to read a FHIR resource stored in Cassandra. @@ -41,9 +36,6 @@ public class CqlReadResource { private static final Logger logger = Logger.getLogger(CqlReadResource.class.getName()); - // The partition_id key value - private final String partitionId; - // The resource_type_id key value private final int resourceTypeId; @@ -52,25 +44,32 @@ public class CqlReadResource { // The version for the resource private final int version; + + // The unique key used to always correctly tie this record to the RDBMS + private final String resourcePayloadKey; + // Elements for subsetting the resource during parse private final List elements; + + // Is the payload compressed when stored + private final boolean payloadCompressed; /** * Public constructor - * @param partitionId * @param resourceTypeId * @param logicalId * @param version + * @param resourcePayloadKey * @param elements + * @param payloadCompressed */ - public CqlReadResource(String partitionId, int resourceTypeId, String logicalId, int version, List elements) { - CqlDataUtil.safeId(partitionId); - CqlDataUtil.safeId(logicalId); - this.partitionId = partitionId; + public CqlReadResource(int resourceTypeId, String logicalId, int version, String resourcePayloadKey, List elements, boolean payloadCompressed) { this.resourceTypeId = resourceTypeId; this.logicalId = logicalId; this.version = version; + this.resourcePayloadKey = resourcePayloadKey; this.elements = elements; + this.payloadCompressed = payloadCompressed; } /** @@ -83,53 +82,49 @@ public CqlReadResource(String partitionId, int resourceTypeId, String logicalId, public T run(Class resourceType, CqlSession session) throws FHIRPersistenceException { T result; - // Firstly, look up the payload_id for the latest version of the resource. The table - // is already ordered by version DESC so we don't need to do any explicit order by to - // get the most recent version. Simply picking the first row which matches the given - // resourceType/logicalId is sufficient. + // Read the resource record, and if the payload was stored in-line, we + // can process the chunk directly. If chunk is null, we have to read + // from the payload_chunks table instead Select statement = - selectFrom("logical_resources") - .column("payload_id") + selectFrom("resource_payloads") .column("chunk") - .whereColumn("partition_id").isEqualTo(literal(partitionId)) .whereColumn("resource_type_id").isEqualTo(literal(resourceTypeId)) .whereColumn("logical_id").isEqualTo(bindMarker()) .whereColumn("version").isEqualTo(bindMarker()) + .whereColumn("resource_payload_key").isEqualTo(bindMarker()) .limit(1) ; PreparedStatement ps = session.prepare(statement.build()); try { - ResultSet lrResult = session.execute(ps.bind(logicalId, version)); + ResultSet lrResult = session.execute(ps.bind(logicalId, version, resourcePayloadKey)); Row row = lrResult.one(); if (row != null) { // If the chunk is small, it's stored in the LOGICAL_RESOURCS record. If // it's big, then it requires multiple fetches - String payloadId = row.getString(1); - if (payloadId != null) { - // Big payload split into multiple chunks. Requires a separate read - return readFromChunks(resourceType, session, payloadId); + ByteBuffer bb = row.getByteBuffer(0); + if (bb != null) { + // The payload is small enough to fit in the current row + InputOutputByteStream readStream = new InputOutputByteStream(bb); + result = FHIRPersistenceSupport.parse(resourceType, readStream.inputStream(), this.elements, this.payloadCompressed); } else { - // The payload is small enough to fit in the current row, so no need for an - // extra read - ByteBuffer bb = row.getByteBuffer(2); - try (InputStream in = new GZIPInputStream(new CqlPayloadStream(bb))) { - result = parseStream(resourceType, in); - } + // Big payload split into multiple chunks. Requires a separate read + return readFromChunks(resourceType, session); } } else { // resource doesn't exist. result = null; } } catch (IOException x) { - logger.log(Level.SEVERE, "Error reading resource partition_id=" + partitionId + ", resourceTypeId=" + resourceTypeId + logger.log(Level.SEVERE, "Error reading resource resourceTypeId=" + resourceTypeId + ", logicalId=" + logicalId); throw new CqlPersistenceException("error reading resource", x); } catch (FHIRParserException x) { // particularly bad...resources are validated before being saved, so if we get a - // parse failure here that's not IO related, it's not good (database altered?) - logger.log(Level.SEVERE, "Error parsing resource partition_id=" + partitionId + ", resourceTypeId=" + resourceTypeId + // parse failure here that's not IO related, it's not good (database altered or + // inconsistent?) + logger.log(Level.SEVERE, "Error parsing resource resourceTypeId=" + resourceTypeId + ", logicalId=" + logicalId); throw new CqlPersistenceException("parse resource failed", x); } @@ -141,47 +136,21 @@ public T run(Class resourceType, CqlSession session) thr * Read the resource payload from the payload_chunks table * @param resourceType * @param session - * @param payloadId * @return */ - private T readFromChunks(Class resourceType, CqlSession session, String payloadId) throws IOException, FHIRParserException { + private T readFromChunks(Class resourceType, CqlSession session) throws IOException, FHIRParserException { Select statement = selectFrom("payload_chunks") + .column("ordinal") .column("chunk") - .whereColumn("partition_id").isEqualTo(literal(partitionId)) - .whereColumn("payload_id").isEqualTo(bindMarker()) + .whereColumn("resource_payload_key").isEqualTo(bindMarker()) .orderBy("ordinal", ClusteringOrder.ASC) ; PreparedStatement ps = session.prepare(statement.build()); - ResultSet chunks = session.execute(ps.bind(logicalId, version)); - try (InputStream in = new GZIPInputStream(new CqlChunkedPayloadStream(chunks))) { - return parseStream(resourceType, in); - } - } - - /** - * Parse the input stream, processing for elements if needed - * @param - * @param resourceType - * @param in - * @return - * @throws IOException - * @throws FHIRParserException - */ - private T parseStream(Class resourceType, InputStream in) throws IOException, FHIRParserException { - T result; - if (elements != null) { - // parse/filter the resource using elements - result = FHIRParser.parser(Format.JSON).as(FHIRJsonParser.class).parseAndFilter(in, elements); - if (resourceType.equals(result.getClass()) && !FHIRUtil.hasTag(result, SearchConstants.SUBSETTED_TAG)) { - // add a SUBSETTED tag to this resource to indicate that its elements have been filtered - result = FHIRUtil.addTag(result, SearchConstants.SUBSETTED_TAG); - } - } else { - result = FHIRParser.parser(Format.JSON).parse(in); + ResultSet chunks = session.execute(ps.bind(resourcePayloadKey)); + try (InputStream in = new CqlChunkedPayloadStream(new ResultSetBufferProvider(chunks, 1))) { + return FHIRPersistenceSupport.parse(resourceType, in, this.elements, this.payloadCompressed); } - - return result; } } \ No newline at end of file diff --git a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/CqlStorePayload.java b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/CqlStorePayload.java index 8d97dfaa542..e8837783779 100644 --- a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/CqlStorePayload.java +++ b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/CqlStorePayload.java @@ -1,5 +1,5 @@ /* - * (C) Copyright IBM Corp. 2020, 2021 + * (C) Copyright IBM Corp. 2020, 2022 * * SPDX-License-Identifier: Apache-2.0 */ @@ -10,14 +10,13 @@ import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.insertInto; import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; import static com.ibm.fhir.persistence.cassandra.cql.SchemaConstants.CHUNK_SIZE; -import static com.ibm.fhir.persistence.cassandra.cql.SchemaConstants.LOGICAL_RESOURCES; import static com.ibm.fhir.persistence.cassandra.cql.SchemaConstants.PAYLOAD_CHUNKS; +import static com.ibm.fhir.persistence.cassandra.cql.SchemaConstants.RESOURCE_PAYLOADS; +import static com.ibm.fhir.persistence.cassandra.cql.SchemaConstants.RESOURCE_VERSIONS; import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; -import java.util.UUID; import java.util.logging.Level; import java.util.logging.Logger; @@ -39,9 +38,7 @@ public class CqlStorePayload { private static final Logger logger = Logger.getLogger(CqlStorePayload.class.getName()); - // Possibly patient, or some other partitioning key - private final String partitionId; - + // The RDBMS identifier for the resource type private final int resourceTypeId; // The logical identifier we have assigned to the resource @@ -52,20 +49,23 @@ public class CqlStorePayload { // The anticipated version private final int version; + + // The unique key value for this resource payload assigned by the server + private final String resourcePayloadKey; /** * Public constructor - * @param partitionId * @param resourceTypeId * @param logicalId * @param version + * @param resourcePayloadKey * @param payloadStream */ - public CqlStorePayload(String partitionId, int resourceTypeId, String logicalId, int version, InputOutputByteStream payloadStream) { - this.partitionId = partitionId; + public CqlStorePayload(int resourceTypeId, String logicalId, int version, String resourcePayloadKey, InputOutputByteStream payloadStream) { this.logicalId = logicalId; this.resourceTypeId = resourceTypeId; this.version = version; + this.resourcePayloadKey = resourcePayloadKey; this.payloadStream = payloadStream; } @@ -77,101 +77,138 @@ public CqlStorePayload(String partitionId, int resourceTypeId, String logicalId, * @param session */ public void run(CqlSession session) throws FHIRPersistenceException { - // Random id string used to tie together the resource record to - // the child payload chunk records. This is needed because - // we may split the payload into multiple chunks - but only if + // We may split the payload into multiple chunks - but only if // the payload exceeds the chunk size. If it doesn't, we store // it in the main resource table, avoiding the cost of a second // random read when we need to access it again. - final String payloadId = payloadStream.size() > CHUNK_SIZE ? UUID.randomUUID().toString() : null; - storeResource(session, payloadId); + final boolean storeInline = payloadStream.size() <= CHUNK_SIZE; + storeResourceVersion(session); + storeResource(session, storeInline); - if (payloadId != null) { + if (!storeInline) { // payload too big for the main resource table, so break it // into smaller chunks and store as adjacent rows in a child // table - storePayloadChunks(session, payloadId); + storePayloadChunks(session); } } /** - * Store the resource record + * Store the resource version record * @param session - * @param payloadId the unique id for storing the payload in multiple chunks */ - private void storeResource(CqlSession session, String payloadId) throws FHIRPersistenceException { + private void storeResourceVersion(CqlSession session) throws FHIRPersistenceException { RegularInsert insert = - insertInto(LOGICAL_RESOURCES) - .value("partition_id", literal(partitionId)) - .value("resource_type_id", bindMarker()) + insertInto(RESOURCE_VERSIONS) + .value("resource_type_id", literal(resourceTypeId)) .value("logical_id", bindMarker()) .value("version", bindMarker()) + .value("resource_payload_key", bindMarker()) ; - // If we are given a payloadId it means that the payload is too large - // to fit inside a single row, so instead we break it into multiple - // rows in the payload_chunks table, using the payloadId as the key - if (payloadId != null) { - insert.value("payload_id", bindMarker()); - } else { - insert.value("chunk", bindMarker()); + if (logger.isLoggable(Level.FINE)) { + logger.fine("Storing resource version record for '" + + resourceTypeId + "/" + logicalId + "/" + version + "'; size=" + payloadStream.size()); } PreparedStatement ps = session.prepare(insert.build()); - BoundStatementBuilder bsb = ps.boundStatementBuilder(resourceTypeId, logicalId, version); + final BoundStatementBuilder bsb = ps.boundStatementBuilder(logicalId, version, resourcePayloadKey); + + try { + session.execute(bsb.build()); + } catch (Exception x) { + logger.log(Level.SEVERE, "insert into resource_payloads failed for '" + + resourceTypeId + "/" + logicalId + "/" + version + "'", x); + throw new FHIRPersistenceDataAccessException("Failed inserting into " + RESOURCE_PAYLOADS); + } + } + + /** + * Store the resource record + * @param session + * @param storeInline when true, store the payload inline in resource_payloads + */ + private void storeResource(CqlSession session, boolean storeInline) throws FHIRPersistenceException { + RegularInsert insert = + insertInto(RESOURCE_PAYLOADS) + .value("resource_type_id", literal(resourceTypeId)) + .value("logical_id", bindMarker()) + .value("version", bindMarker()) + .value("resource_payload_key", bindMarker()) + ; - if (payloadId != null) { - // payload is too big to go in the main table, so just store the reference id here - bsb.setString(4, payloadId); + if (logger.isLoggable(Level.FINE)) { + logger.fine("Storing payload for '" + + resourceTypeId + "/" + logicalId + "/" + version + "'; size=" + payloadStream.size()); + } + + // If the payload is small enough to fit in a single chunk, we can store + // it in line with the main resource record + if (storeInline) { + insert = insert.value("chunk", bindMarker()); + } + + PreparedStatement ps = session.prepare(insert.build()); + final BoundStatementBuilder bsb; + + if (storeInline) { + // small enough, so we bind the payload here + bsb = ps.boundStatementBuilder(logicalId, version, resourcePayloadKey, payloadStream.wrap()); } else { - // small enough, so we store directly in the main logical_resources table - bsb.setByteBuffer(4, payloadStream.wrap()); + // too big to be inlined, so don't include the payload here + bsb = ps.boundStatementBuilder(logicalId, version, resourcePayloadKey); } try { session.execute(bsb.build()); } catch (Exception x) { - logger.log(Level.SEVERE, "insert into logical_resources failed for '" - + partitionId + "/" + resourceTypeId + "/" + logicalId + "/" + version + "'", x); - throw new FHIRPersistenceDataAccessException("Failed inserting into " + LOGICAL_RESOURCES); + logger.log(Level.SEVERE, "insert into resource_payloads failed for '" + + resourceTypeId + "/" + logicalId + "/" + version + "'", x); + throw new FHIRPersistenceDataAccessException("Failed inserting into " + RESOURCE_PAYLOADS); } } /** * Store the payload data as a contiguous set of rows ordered by * an ordinal which, being part of the key, is used to retrieve the data in the same - * order so that the original order. + * order they were inserted. * @param session - * @param payloadId */ - private void storePayloadChunks(CqlSession session, String payloadId) { -// + "partition_id text, " -// + "payload_id text, " -// + "ordinal int, " -// + "chunk blob, " + private void storePayloadChunks(CqlSession session) { SimpleStatement statement = insertInto(PAYLOAD_CHUNKS) - .value("partition_id", bindMarker()) - .value("payload_id", bindMarker()) + .value("resource_payload_key", bindMarker()) .value("ordinal", bindMarker()) .value("chunk", bindMarker()) .build(); PreparedStatement ps = session.prepare(statement); - + List> statements = new ArrayList<>(); int ordinal = 0; - int offset = 0; ByteBuffer bb = payloadStream.wrap(); byte[] buffer = new byte[CHUNK_SIZE]; - while (offset < payloadStream.size()) { - // shame we have to copy the array here - int len = Math.min(CHUNK_SIZE, payloadStream.size() - offset); - bb.get(buffer, offset, len); + while (bb.hasRemaining()) { + // shame we have to copy the array here rather than subset it + int len = Math.min(CHUNK_SIZE, bb.remaining()); + bb.get(buffer, 0, len); // read len bytes into the buffer - statements.add(ps.bind(partitionId, payloadId, ordinal++, buffer)); - offset += CHUNK_SIZE; + // Wrap the byte array into a new read-only ByteBuffer + ByteBuffer chunk = ByteBuffer.wrap(buffer, 0, len).asReadOnlyBuffer(); + + // and add the chunk statement to the batch + if (logger.isLoggable(Level.FINE)) { + logger.fine("Payload chunk offset[" + ordinal + "] size = " + len); + } + BoundStatementBuilder bsb = ps.boundStatementBuilder(resourcePayloadKey, ordinal++, chunk); + statements.add(bsb.build()); + + if (bb.hasRemaining()) { + // The ByteBuffer adopts the buffer byte array, so we need to make sure + // we create a new one each time + buffer = new byte[CHUNK_SIZE]; + } } BatchStatement batch = diff --git a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/FHIRPayloadPersistenceCassandraImpl.java b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/FHIRPayloadPersistenceCassandraImpl.java index ff28bb56769..787f2e523e1 100644 --- a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/FHIRPayloadPersistenceCassandraImpl.java +++ b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/FHIRPayloadPersistenceCassandraImpl.java @@ -1,30 +1,32 @@ /* - * (C) Copyright IBM Corp. 2021 + * (C) Copyright IBM Corp. 2021, 2022 * * SPDX-License-Identifier: Apache-2.0 */ package com.ibm.fhir.persistence.cassandra.payload; -import java.io.IOException; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.Future; import java.util.logging.Level; import java.util.logging.Logger; -import java.util.zip.GZIPOutputStream; import com.datastax.oss.driver.api.core.CqlSession; -import com.ibm.fhir.model.format.Format; -import com.ibm.fhir.model.generator.FHIRGenerator; -import com.ibm.fhir.model.generator.exception.FHIRGeneratorException; +import com.ibm.fhir.config.FHIRConfigHelper; +import com.ibm.fhir.config.FHIRConfiguration; +import com.ibm.fhir.config.FHIRRequestContext; +import com.ibm.fhir.config.PropertyGroup; import com.ibm.fhir.model.resource.Resource; +import com.ibm.fhir.persistence.FHIRPersistenceSupport; +import com.ibm.fhir.persistence.cassandra.CassandraPropertyGroupAdapter; import com.ibm.fhir.persistence.cassandra.cql.DatasourceSessions; +import com.ibm.fhir.persistence.cassandra.cql.TenantDatasourceKey; import com.ibm.fhir.persistence.exception.FHIRPersistenceException; -import com.ibm.fhir.persistence.payload.FHIRPayloadPartitionStrategy; import com.ibm.fhir.persistence.payload.FHIRPayloadPersistence; -import com.ibm.fhir.persistence.payload.PayloadKey; -import com.ibm.fhir.persistence.payload.PayloadPersistenceHelper; +import com.ibm.fhir.persistence.payload.PayloadPersistenceResponse; +import com.ibm.fhir.persistence.payload.PayloadPersistenceResult; +import com.ibm.fhir.persistence.payload.PayloadPersistenceResult.Status; import com.ibm.fhir.persistence.util.InputOutputByteStream; @@ -38,30 +40,16 @@ public class FHIRPayloadPersistenceCassandraImpl implements FHIRPayloadPersisten private static final Logger logger = Logger.getLogger(FHIRPayloadPersistenceCassandraImpl.class.getName()); private static final long NANOS = 1000000000L; - // The strategy used to obtain the partition name for a given resource - private final FHIRPayloadPartitionStrategy partitionStrategy; + // ** DO NOT CHANGE** The number of Base64 digits to use in the partition hash (4*6 = 24 bits) + public static final int PARTITION_HASH_BASE64_DIGITS = 4; + + // For Cassandra we always compress the payload + public static final boolean PAYLOAD_COMPRESSED = true; /** * Public constructor - * @param ps the partition strategy - */ - public FHIRPayloadPersistenceCassandraImpl(FHIRPayloadPartitionStrategy ps) { - this.partitionStrategy = ps; - } - - /** - * Gets a partition strategy which uses a constant partition name - * of "default" - * @return the partition strategy */ - public static FHIRPayloadPartitionStrategy defaultPartitionStrategy() { - return new FHIRPayloadPartitionStrategy() { - - @Override - public String getPartitionName() { - return "default"; - } - }; + public FHIRPayloadPersistenceCassandraImpl() { } /** @@ -72,46 +60,61 @@ protected CqlSession getCqlSession() { } @Override - public Future storePayload(String resourceType, int resourceTypeId, String logicalId, int version, Resource resource) + public PayloadPersistenceResponse storePayload(String resourceType, int resourceTypeId, String logicalId, int version, String resourcePayloadKey, Resource resource) throws FHIRPersistenceException { + Future result; + final CassandraPropertyGroupAdapter config = getConfigAdapter(); try (CqlSession session = getCqlSession()) { - // render to a compressed stream and store - InputOutputByteStream ioStream = PayloadPersistenceHelper.render(resource, true); - String partitionName = partitionStrategy.getPartitionName(); - CqlStorePayload spl = new CqlStorePayload(partitionName, resourceTypeId, logicalId, version, ioStream); + // Get the IO stream for the rendered resource. + InputOutputByteStream ioStream = FHIRPersistenceSupport.render(resource, PAYLOAD_COMPRESSED); + CqlStorePayload spl = new CqlStorePayload(resourceTypeId, logicalId, version, resourcePayloadKey, ioStream); spl.run(session); - - PayloadKey payloadKey = new PayloadKey(resourceType, resourceTypeId, logicalId, version, partitionName, logicalId, PayloadKey.Status.OK); - return CompletableFuture.completedFuture(payloadKey); + + // TODO actual async behavior + result = CompletableFuture.completedFuture(new PayloadPersistenceResult(Status.OK)); + } catch (Exception x) { + logger.log(Level.SEVERE, "storePayload failed for resource '" + + resourceType + "[" + resourceTypeId + "]/" + logicalId + "/_history/" + version + "'", x); + result = CompletableFuture.completedFuture(new PayloadPersistenceResult(Status.FAILED)); } + return new PayloadPersistenceResponse(resourcePayloadKey, resourceType, resourceTypeId, logicalId, version, result); } @Override - public T readResource(Class resourceType, int resourceTypeId, String logicalId, int version, List elements) throws FHIRPersistenceException { + public T readResource(Class resourceType, String rowResourceTypeName, int resourceTypeId, String logicalId, + int version, String resourcePayloadKey, List elements) throws FHIRPersistenceException { + logger.fine(() -> "readResource " + rowResourceTypeName + "[" + resourceTypeId + "]/" + logicalId + "/_history/" + version); try (CqlSession session = getCqlSession()) { - CqlReadResource spl = new CqlReadResource(partitionStrategy.getPartitionName(), resourceTypeId, logicalId, version, elements); + CqlReadResource spl = new CqlReadResource(resourceTypeId, logicalId, version, resourcePayloadKey, elements, PAYLOAD_COMPRESSED); return spl.run(resourceType, session); } } @Override - public Future readResource(Class resourceType, PayloadKey payloadKey) throws FHIRPersistenceException { - + public void deletePayload(String resourceType, int resourceTypeId, String logicalId, Integer version, String resourcePayloadKey) throws FHIRPersistenceException { try (CqlSession session = getCqlSession()) { - // Currently not supporting a real async implementation, so we complete the read - // synchronously here - CqlReadResource spl = new CqlReadResource(payloadKey.getPartitionKey(), - payloadKey.getResourceTypeId(), payloadKey.getLogicalId(), payloadKey.getVersionId(), null); - T resource = spl.run(resourceType, session); - return CompletableFuture.completedFuture(resource); + // Currently not supporting a real async implementation, so we + // process synchronously + CqlDeletePayload spl = new CqlDeletePayload(resourceTypeId, logicalId, version, resourcePayloadKey); + spl.run(session); } } - @Override - public void deletePayload(int resourceTypeId, String logicalId, int version) throws FHIRPersistenceException { - // TODO Auto-generated method stub - + /** + * Get the {@link CassandraPropertyGroupAdapter} describing the configuration to use for + * this payload offload implementation + * @return + */ + private CassandraPropertyGroupAdapter getConfigAdapter() { + final String tenantId = FHIRRequestContext.get().getTenantId(); + final String dsId = FHIRRequestContext.get().getDataStoreId(); + TenantDatasourceKey key = new TenantDatasourceKey(tenantId, dsId); + String dsPropertyName = FHIRConfiguration.PROPERTY_PERSISTENCE_PAYLOAD + "/" + key.getDatasourceId(); + PropertyGroup dsPG = FHIRConfigHelper.getPropertyGroup(dsPropertyName); + + // Wrap the PropertyGroup in an adapter to make it easier to consume + return new CassandraPropertyGroupAdapter(dsPG); } } \ No newline at end of file diff --git a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/IBufferProvider.java b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/IBufferProvider.java new file mode 100644 index 00000000000..452c5f67d84 --- /dev/null +++ b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/IBufferProvider.java @@ -0,0 +1,22 @@ +/* + * (C) Copyright IBM Corp. 2021 + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.fhir.persistence.cassandra.payload; + +import java.nio.ByteBuffer; + +/** + * Provides a sequential list of buffers which can be iterated over + * to recompose a larger buffer that has been broken into chunks + */ +public interface IBufferProvider { + + /** + * Get the next buffer in the sequence, or null when the end has been reached + * @return + */ + ByteBuffer nextBuffer(); +} diff --git a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/ResultSetBufferProvider.java b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/ResultSetBufferProvider.java new file mode 100644 index 00000000000..7f944386d56 --- /dev/null +++ b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/payload/ResultSetBufferProvider.java @@ -0,0 +1,41 @@ +/* + * (C) Copyright IBM Corp. 2021 + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.fhir.persistence.cassandra.payload; + +import java.nio.ByteBuffer; + +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Row; + +/** + * Provides buffers read from a CQL query result set + */ +public class ResultSetBufferProvider implements IBufferProvider { + + private final ResultSet resultSet; + private final int columnIndex; + + /** + * Public constructor + * @param rs + * @param columnIndex + */ + public ResultSetBufferProvider(ResultSet rs, int columnIndex) { + this.resultSet = rs; + this.columnIndex = columnIndex; + } + + @Override + public ByteBuffer nextBuffer() { + Row row = this.resultSet.one(); + if (row != null) { + return row.getByteBuffer(columnIndex); + } else { + return null; + } + } +} \ No newline at end of file diff --git a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/reconcile/CqlScanResources.java b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/reconcile/CqlScanResources.java new file mode 100644 index 00000000000..5c96851e712 --- /dev/null +++ b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/reconcile/CqlScanResources.java @@ -0,0 +1,105 @@ +/* + * (C) Copyright IBM Corp. 2020, 2021 + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.fhir.persistence.cassandra.reconcile; + +import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.literal; +import static com.datastax.oss.driver.api.querybuilder.QueryBuilder.selectFrom; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.function.Function; +import java.util.logging.Logger; + +import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.CqlSession; +import com.datastax.oss.driver.api.core.cql.ResultSet; +import com.datastax.oss.driver.api.core.cql.Row; +import com.datastax.oss.driver.api.core.cql.SimpleStatement; +import com.datastax.oss.driver.api.querybuilder.select.Select; +import com.datastax.oss.driver.api.querybuilder.select.Selector; +import com.ibm.fhir.persistence.exception.FHIRPersistenceException; + +/** + * CQL command to read a FHIR resource stored in Cassandra. + */ +public class CqlScanResources { + private static final Logger logger = Logger.getLogger(CqlScanResources.class.getName()); + + // The marker used to determine the start position for the table scan + private final long startToken; + + // A function used to process each record that is fetched + private final Function recordHandler; + + /** + * Public constructor + * @param startToken + * @param recordHandler + */ + public CqlScanResources(long startToken, Function recordHandler) { + this.startToken = startToken; + this.recordHandler = recordHandler; + } + + /** + * Execute the CQL read query and return the Resource for the resourceTypeId, logicalId, version + * tuple. + * @param resourceType + * @param session + * @return + */ + public long run(CqlSession session) throws FHIRPersistenceException { + + logger.fine(() -> "Fetching from start token=" + startToken); + List identifiers = new ArrayList<>(); + List tokenSelectors = new ArrayList<>(); + for (String col: Arrays.asList("resource_type_id","logical_id","version")) { + identifiers.add(CqlIdentifier.fromCql(col)); + tokenSelectors.add(Selector.column(col)); + } + // Scan resources from a previous known point + final Select statement = + selectFrom("resource_payloads") + .function(CqlIdentifier.fromCql("\"token\""), tokenSelectors) + .column("resource_type_id") + .column("logical_id") + .column("version") + .column("resource_payload_key") + .whereTokenFromIds(identifiers).isGreaterThanOrEqualTo(literal(this.startToken)) + .limit(1024) + ; + + SimpleStatement simpleStatement = statement.build(); + ResultSet lrResult = session.execute(simpleStatement); + + long lastToken = startToken; + Iterator it = lrResult.iterator(); + while (it.hasNext()) { + Row row = it.next(); + lastToken = row.getLong(0); + ResourceRecord rec = new ResourceRecord( + row.getInt(1), row.getString(2), row.getInt(3), row.getString(4)); + if (!recordHandler.apply(rec)) { + logger.info("Handler requested we stop processing before the current fetch has completed"); + lastToken = Long.MIN_VALUE; // flag end + break; + } + } + + // This works as long as limit > 1 + if (lastToken == startToken) { + // no more data to be read, so we can flag it's the end + lastToken = Long.MIN_VALUE; + } + + // Return the last token value we read so that the next read can start + // at this point+1 + return lastToken; + } +} \ No newline at end of file diff --git a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/reconcile/PayloadReconciliation.java b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/reconcile/PayloadReconciliation.java new file mode 100644 index 00000000000..1c8d05a7077 --- /dev/null +++ b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/reconcile/PayloadReconciliation.java @@ -0,0 +1,180 @@ +/* + * (C) Copyright IBM Corp. 2022 + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.fhir.persistence.cassandra.reconcile; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import java.util.logging.Level; +import java.util.logging.Logger; + +import com.datastax.oss.driver.api.core.CqlSession; +import com.ibm.fhir.config.FHIRRequestContext; +import com.ibm.fhir.database.utils.api.ITransaction; +import com.ibm.fhir.database.utils.model.DbType; +import com.ibm.fhir.database.utils.pool.DatabaseSupport; +import com.ibm.fhir.persistence.cassandra.cql.DatasourceSessions; +import com.ibm.fhir.persistence.cassandra.payload.CqlDeletePayload; +import com.ibm.fhir.persistence.exception.FHIRPersistenceException; + +/** + * Implements an algorithm to scan the offload persistence store and check + * that the RDBMS contains the corresponding record. As the RDBMS is the + * source of truth, any records in the persistence store without a + * corresponding record in the RDBMS should be deleted. + */ +public class PayloadReconciliation { + private static final Logger logger = Logger.getLogger(PayloadReconciliation.class.getName()); + + // The tenant id to use in the request context + private final String tenantId; + + // The tenant's datastore (usually "default") + private final String dsId; + + // Scan and report but don't delete + private final boolean dryRun; + + // Provides access to database connections and transaction handling + private final DatabaseSupport dbSupport; + + // Tracking how many resource versions we process + private long totalProcessed = 0; + + // Simple map cache of resource type id to name + final Map resourceTypeMap = new HashMap<>(); + + /** + * Public constructor + * @param tenantId + * @param dsId + * @param dbProperties + * @param dbType + * @param dryRun + */ + public PayloadReconciliation(String tenantId, String dsId, Properties dbProperties, DbType dbType, boolean dryRun) { + this.tenantId = tenantId; + this.dsId = dsId; + this.dryRun = dryRun; + this.dbSupport = new DatabaseSupport(dbProperties, dbType); + this.dbSupport.init(); + } + + /** + * Run the reconciliation process + */ + public void run() throws Exception { + long start = System.nanoTime(); + // Set up the request context for the configured tenant and datastore + FHIRRequestContext.set(new FHIRRequestContext(tenantId, dsId)); + + // Keep processing until we make no more progress + long firstToken = Long.MIN_VALUE; + long lastToken; + do { + // To avoid hundreds of tiny transactions, we process one batch of fetches + // inside a single RDBMS transaction + try (ITransaction tx = dbSupport.getTransaction()) { + try (Connection c = dbSupport.getConnection()) { + lastToken = process(c, firstToken); + firstToken = lastToken + 1; // the next starting point + } catch (SQLException x) { + tx.setRollbackOnly(); + throw dbSupport.getTranslator().translate(x); + } + } + } while (lastToken > Long.MIN_VALUE); + + long end = System.nanoTime(); + double elapsed = (end - start) / 1e9; + logger.info(String.format("Processed %d records in %5.1f seconds [rate %5.1f resources/second]", + totalProcessed, elapsed, totalProcessed/elapsed)); + } + + /** + * Scan and process resources within the given partition + * @param firstToken + * @return the last token read, or Long.MIN_VALUE when there aren't any more rows to scan + */ + private long process(Connection c, long firstToken) throws Exception { + CqlSession session = DatasourceSessions.getSessionForTenantDatasource(); + CqlScanResources scanner = new CqlScanResources(firstToken, r->processRecord(session, c, r)); + return scanner.run(session); + } + + /** + * Function to process a record retrieved by the scanner + * @param session + * @param connecion + * @param record + * @return true to continue scanning, false to stop scanning immediately + */ + private Boolean processRecord(CqlSession session, Connection connection, ResourceRecord record) { + boolean keepGoing = true; + this.totalProcessed++; + if (logger.isLoggable(Level.FINE)) { + logger.fine(getLogRecord(record, "CHECK")); + } + + // Check that we have the resource in the RDBMS configured for + // this tenant + try { + ResourceExistsDAO dao = new ResourceExistsDAO(this.resourceTypeMap, + record.getResourceTypeId(), record.getLogicalId(), record.getVersion(), + record.getResourcePayloadKey()); + if (dao.run(connection)) { + // Found the record, so log it + logger.info(getLogRecord(record, "OK")); + } else { + logger.info(getLogRecord(record, "ORPHAN")); + handleOrphanedRecord(session, record); + } + } catch (Exception x) { + // This probably means there's an issue talking to the database, + // or Cassandra, either of which is fatal so we just have to stop + logger.log(Level.SEVERE, getLogRecord(record, "FAILED"), x); + keepGoing = false; + } + return keepGoing; + } + + /** + * Get a consistent log entry description for the given ResourceRecord + * and status string + * @param record + * @param status a status string of 6 characters or less + * @return + */ + private String getLogRecord(ResourceRecord record, String status) { + return String.format("[%6s] %d/%s/%d [%s]", status, + record.getResourceTypeId(), record.getLogicalId(), + record.getVersion(), record.getResourcePayloadKey()); + } + + /** + * Erase the record which exists in the offload payload store + * but not the RDBMS + * @param session + * @param record + */ + private void handleOrphanedRecord(CqlSession session, ResourceRecord record) throws FHIRPersistenceException { + final String action = this.dryRun ? "Would erase" : "Erasing"; + logger.info(String.format("%s orphaned payload %d/%s/%d [%s]", + action, + record.getResourceTypeId(), record.getLogicalId(), + record.getVersion(), record.getResourcePayloadKey())); + + if (!this.dryRun) { + CqlDeletePayload delete = new CqlDeletePayload(record.getResourceTypeId(), + record.getLogicalId(), record.getVersion(), + record.getResourcePayloadKey()); + delete.run(session); + } + } +} diff --git a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/reconcile/ResourceExistsDAO.java b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/reconcile/ResourceExistsDAO.java new file mode 100644 index 00000000000..50d50d4dbf2 --- /dev/null +++ b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/reconcile/ResourceExistsDAO.java @@ -0,0 +1,97 @@ +/* + * (C) Copyright IBM Corp. 2022 + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.fhir.persistence.cassandra.reconcile; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Map; + +/** + * DAO to check if the configured resource exists + */ +public class ResourceExistsDAO { + final int resourceTypeId; + final String logicalId; + final int versionId; + final String resourcePayloadKey; + final Map resourceTypeMap; + + /** + * Public constructor + * @param resourceTypeMap + * @param resourceTypeId + * @param logicalId + * @param versionId + * @param resourcePayloadKey + */ + public ResourceExistsDAO(Map resourceTypeMap, int resourceTypeId, String logicalId, int versionId, String resourcePayloadKey) { + this.resourceTypeMap = resourceTypeMap; + this.resourceTypeId = resourceTypeId; + this.logicalId = logicalId; + this.versionId = versionId; + this.resourcePayloadKey = resourcePayloadKey; + } + + /** + * Run the query to see if the resource version currently exists. + * Does not care about deletion status, just that the row is there. + * @param c + * @return + * @throws SQLException + */ + public boolean run(Connection c) throws SQLException { + final String resourceTypeName = getResourceTypeName(c); + final String SQL = + "SELECT 1 " + + " FROM " + resourceTypeName + "_RESOURCES R, " + + resourceTypeName + "_LOGICAL_RESOURCES LR " + + " WHERE LR.LOGICAL_ID = ? " + + " AND R.LOGICAL_RESOURCE_ID = LR.LOGICAL_RESOURCE_ID " + + " AND R.VERSION_ID = ?" + + " AND R.RESOURCE_PAYLOAD_KEY = ?"; + + boolean result = false; + try (PreparedStatement ps = c.prepareStatement(SQL)) { + ps.setString(1, logicalId); + ps.setInt(2, versionId); + ps.setString(3, resourcePayloadKey); + ResultSet rs = ps.executeQuery(); + if (rs.next()) { + result = true; + } + } + return result; + } + + /** + * Look up the resource type name for the given id + * @param c + * @return + * @throws SQLException + */ + private String getResourceTypeName(Connection c) throws SQLException { + String result = resourceTypeMap.get(resourceTypeId); + if (result == null) { + final String SQL = + "SELECT resource_type FROM resource_types WHERE resource_type_id = ?"; + + try (PreparedStatement ps = c.prepareStatement(SQL)) { + ps.setInt(1, resourceTypeId); + ResultSet rs = ps.executeQuery(); + if (rs.next()) { + result = rs.getString(1); + resourceTypeMap.put(resourceTypeId, result); + } else { + throw new IllegalArgumentException("Invalid resourceTypeId: " + this.resourceTypeId); + } + } + } + return result; + } +} \ No newline at end of file diff --git a/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/reconcile/ResourceRecord.java b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/reconcile/ResourceRecord.java new file mode 100644 index 00000000000..1f21e913515 --- /dev/null +++ b/fhir-persistence-cassandra/src/main/java/com/ibm/fhir/persistence/cassandra/reconcile/ResourceRecord.java @@ -0,0 +1,60 @@ +/* + * (C) Copyright IBM Corp. 2022 + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.fhir.persistence.cassandra.reconcile; + + +/** + * A DTO used to represent a resource record stored in Cassandra + */ +public class ResourceRecord { + private final int resourceTypeId; + private final String logicalId; + private final int version; + private final String resourcePayloadKey; + + /** + * Public constructor + * @param resourceTypeId + * @param logicalId + * @param version + * @param resourcePayloadKey + */ + public ResourceRecord(int resourceTypeId, String logicalId, int version, String resourcePayloadKey) { + this.resourceTypeId = resourceTypeId; + this.logicalId = logicalId; + this.version = version; + this.resourcePayloadKey = resourcePayloadKey; + } + + /** + * @return the resourceTypeId + */ + public int getResourceTypeId() { + return resourceTypeId; + } + + /** + * @return the logicalId + */ + public String getLogicalId() { + return logicalId; + } + + /** + * @return the version + */ + public int getVersion() { + return version; + } + + /** + * @return the resourcePayloadKey + */ + public String getResourcePayloadKey() { + return resourcePayloadKey; + } +} \ No newline at end of file diff --git a/fhir-persistence-cassandra/src/test/java/com/ibm/fhir/persistence/cassandra/test/ChunkTest.java b/fhir-persistence-cassandra/src/test/java/com/ibm/fhir/persistence/cassandra/test/ChunkTest.java new file mode 100644 index 00000000000..a407b8e3004 --- /dev/null +++ b/fhir-persistence-cassandra/src/test/java/com/ibm/fhir/persistence/cassandra/test/ChunkTest.java @@ -0,0 +1,142 @@ +/* + * (C) Copyright IBM Corp. 2021 + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.fhir.persistence.cassandra.test; + +import static org.testng.Assert.assertTrue; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.List; +import java.util.zip.GZIPInputStream; +import java.util.zip.GZIPOutputStream; + +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + +import com.ibm.fhir.persistence.cassandra.payload.CqlChunkedPayloadStream; +import com.ibm.fhir.persistence.cassandra.payload.IBufferProvider; +import com.ibm.fhir.persistence.util.InputOutputByteStream; + +/** + * Unit test for the payload chunk write/read cycle + */ +public class ChunkTest { + + String bigString; + + /** + * Build a random string because we don't want GZIP to compress it + */ + @BeforeClass + public void prepare() { + StringBuilder big = new StringBuilder(); + SecureRandom rnd = new SecureRandom(); + for (int i=0; i<1024*1024; i++) { + big.append(rnd.nextLong()); + } + + bigString = big.toString(); + } + + /** + * Test processing a big object written and read as a single chunk + */ + @Test + public void testBigSingleChunk() throws IOException { + + // Render the string to a byte-buffer + InputOutputByteStream iobs = new InputOutputByteStream(4096); + try (GZIPOutputStream os = new GZIPOutputStream(iobs.outputStream())) { + os.write(bigString.getBytes(StandardCharsets.UTF_8)); + os.finish(); + } + + // Decode the whole thing once just so we know what we're doing + try (GZIPInputStream is = new GZIPInputStream(iobs.inputStream())) { + // Keep reading the stream of bytes to recompose the big string + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + int nr; + byte b[] = new byte[4096]; + while ((nr = is.read(b)) >= 0) { + if (nr > 0) { + bos.write(b, 0, nr); + } + } + + bos.flush(); + String readBack = new String(bos.toByteArray(), StandardCharsets.UTF_8); + + // Doing it this way because we don't want the huge strings to be printed + // if something went wrong with assertEquals(readBack, bigString) + assertTrue(readBack.equals(bigString)); + } + } + + /** + * Test processing a big object written and read as multiple chunks + */ + @Test + public void testBigMultipleChunks() throws IOException { + + // Render the string to a byte-buffer + InputOutputByteStream iobs = new InputOutputByteStream(4096); + try (GZIPOutputStream os = new GZIPOutputStream(iobs.outputStream())) { + os.write(bigString.getBytes(StandardCharsets.UTF_8)); + os.finish(); + } + + // Break the stream into multiple chunks + List chunks = new ArrayList<>(); + try (InputStream is = iobs.inputStream()) { + int nr; + byte b[] = new byte[4096]; + while ((nr = is.read(b)) >= 0) { + if (nr > 0) { + ByteBuffer bb = ByteBuffer.wrap(b, 0, nr); + chunks.add(bb); + + // The ByteBuffer adopts the byte array, so we need a new one each time + b = new byte[4096]; + } + } + } + + // Wrap our list of chunks in a simple IBufferProvider implementation + // which we can use with our payload stream + IBufferProvider bufferProvider = new IBufferProvider() { + int idx = 0; + @Override + public ByteBuffer nextBuffer() { + return idx < chunks.size() ? chunks.get(idx++) : null; + } + }; + + // Decompress the chunked stream + try (GZIPInputStream is = new GZIPInputStream(new CqlChunkedPayloadStream(bufferProvider))) { + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + int nr; + byte b[] = new byte[4096]; + while ((nr = is.read(b)) >= 0) { + if (nr > 0) { + bos.write(b, 0, nr); + } + } + + bos.flush(); + String readBack = new String(bos.toByteArray(), StandardCharsets.UTF_8); + + // Doing it this way because we don't want the huge strings to be printed + // if something went wrong with assertEquals(readBack, bigString) + assertTrue(readBack.equals(bigString)); + } + } +} \ No newline at end of file diff --git a/fhir-persistence-cassandra/src/test/java/com/ibm/fhir/persistence/cassandra/test/PayloadStreamTest.java b/fhir-persistence-cassandra/src/test/java/com/ibm/fhir/persistence/cassandra/test/PayloadStreamTest.java new file mode 100644 index 00000000000..430d1395dfc --- /dev/null +++ b/fhir-persistence-cassandra/src/test/java/com/ibm/fhir/persistence/cassandra/test/PayloadStreamTest.java @@ -0,0 +1,80 @@ +/* + * (C) Copyright IBM Corp. 2021 + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.fhir.persistence.cassandra.test; + +import static com.ibm.fhir.model.type.String.string; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertNotNull; + +import java.io.IOException; +import java.nio.ByteBuffer; + +import org.testng.annotations.Test; + +import com.ibm.fhir.model.generator.exception.FHIRGeneratorException; +import com.ibm.fhir.model.parser.exception.FHIRParserException; +import com.ibm.fhir.model.resource.Patient; +import com.ibm.fhir.model.type.Narrative; +import com.ibm.fhir.model.type.Reference; +import com.ibm.fhir.model.type.Xhtml; +import com.ibm.fhir.model.type.code.NarrativeStatus; +import com.ibm.fhir.persistence.FHIRPersistenceSupport; +import com.ibm.fhir.persistence.cassandra.payload.CqlPayloadStream; +import com.ibm.fhir.persistence.util.InputOutputByteStream; + +/** + * Unit test for {@link CqlPayloadStream} + */ +public class PayloadStreamTest { + + @Test + public void roundTripNormal() throws IOException, FHIRParserException, FHIRGeneratorException { + Patient patient = Patient.builder() + .id("a-unique-value") + .generalPractitioner(Reference.builder() + .reference(string("Practitioner/1")) + .build()) + .text(Narrative.builder() + .div(Xhtml.of("
Some narrative
")) + .status(NarrativeStatus.GENERATED) + .build()) + .build(); + + InputOutputByteStream iobs = FHIRPersistenceSupport.render(patient, true); + + // Get the data as a ByteBuffer to pretend that it was stored in Cassandra and read back + ByteBuffer bb = iobs.wrap(); + InputOutputByteStream readStream = new InputOutputByteStream(bb); + Patient p = FHIRPersistenceSupport.parse(Patient.class, readStream.inputStream(), null, true); + assertNotNull(p); + assertEquals(p.getId(), patient.getId()); + } + + @Test + public void roundTripNoCompress() throws IOException, FHIRParserException, FHIRGeneratorException { + Patient patient = Patient.builder() + .id("a-unique-value") + .generalPractitioner(Reference.builder() + .reference(string("Practitioner/1")) + .build()) + .text(Narrative.builder() + .div(Xhtml.of("
Some narrative
")) + .status(NarrativeStatus.GENERATED) + .build()) + .build(); + + InputOutputByteStream iobs = FHIRPersistenceSupport.render(patient, false); + + // Get the data as a ByteBuffer to pretend that it was stored in Cassandra and read back + ByteBuffer bb = iobs.wrap(); + InputOutputByteStream readStream = new InputOutputByteStream(bb); + // See if we can read the resource from this stream + Patient p = FHIRPersistenceSupport.parse(Patient.class, readStream.inputStream(), null, false); + assertNotNull(p); + assertEquals(p.getId(), patient.getId()); + } +} \ No newline at end of file diff --git a/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/client/COSConfigAdapter.java b/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/client/COSConfigAdapter.java index c7a95e4c59c..21138585bad 100644 --- a/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/client/COSConfigAdapter.java +++ b/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/client/COSConfigAdapter.java @@ -71,4 +71,9 @@ public interface COSConfigAdapter { */ default int defaultMaxKeys() { return 1000; } + /** + * Is the payload stored compressed + * @return + */ + boolean isCompress(); } \ No newline at end of file diff --git a/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/client/COSPayloadClient.java b/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/client/COSPayloadClient.java index f113f3bfc0f..097310ae7f2 100644 --- a/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/client/COSPayloadClient.java +++ b/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/client/COSPayloadClient.java @@ -9,10 +9,8 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; -import java.util.function.Function; import java.util.logging.Level; import java.util.logging.Logger; -import java.util.zip.GZIPInputStream; import com.ibm.cloud.objectstorage.AmazonServiceException; import com.ibm.cloud.objectstorage.ClientConfiguration; @@ -32,8 +30,10 @@ import com.ibm.cloud.objectstorage.services.s3.model.PutObjectResult; import com.ibm.cloud.objectstorage.services.s3.model.S3Object; import com.ibm.cloud.objectstorage.services.s3.model.S3ObjectInputStream; +import com.ibm.fhir.model.resource.Resource; import com.ibm.fhir.persistence.exception.FHIRPersistenceException; import com.ibm.fhir.persistence.jdbc.exception.FHIRPersistenceDataAccessException; +import com.ibm.fhir.persistence.payload.PayloadReader; import com.ibm.fhir.persistence.util.InputOutputByteStream; /** @@ -96,17 +96,14 @@ public COSPayloadClient(String tenantId, String dsId, CosPropertyGroupAdapter pr * @param fn * @return */ - public T read(String objectName, Function fn) throws FHIRPersistenceException { + public T read(Class resourceType, String objectName, PayloadReader payloadReader) throws FHIRPersistenceException { final String bucketName = getBucketName(); S3Object item = client.getObject(new GetObjectRequest(bucketName, objectName)); if (item != null) { try (S3ObjectInputStream s3InStream = item.getObjectContent()) { - // The resources we store are compressed, so provide the function with - // the decompressed stream. Important to close this to avoid leaks - try (InputStream dataStream = new GZIPInputStream(s3InStream)) { - return fn.apply(dataStream); - } + // Delegate actual reading of the resource to the PayloadReader implementation + return payloadReader.read(resourceType, s3InStream); } catch (IOException x) { logger.log(Level.SEVERE, "error closing stream for '" + bucketName + ":" + objectName + "'"); throw new IllegalStateException("error closing object stream", x); diff --git a/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/client/COSPropertiesAdapter.java b/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/client/COSPropertiesAdapter.java index b61cd9e06a9..a6b6fc501ea 100644 --- a/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/client/COSPropertiesAdapter.java +++ b/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/client/COSPropertiesAdapter.java @@ -48,7 +48,8 @@ public String getBucketName() { @Override public boolean isCredentialIBM() { - return "Y".equalsIgnoreCase(properties.getProperty(COSPropertyConstants.COS_CREDENTIAL_IBM)); + final String value = properties.getProperty(COSPropertyConstants.COS_CREDENTIAL_IBM); + return "Y".equalsIgnoreCase(value) || "true".equalsIgnoreCase(value); } @Override @@ -72,4 +73,10 @@ public int getMaxKeys() { return defaultMaxKeys(); } } + + @Override + public boolean isCompress() { + final String value = properties.getProperty(COSPropertyConstants.COS_COMPRESS); + return "Y".equalsIgnoreCase(value) || "true".equalsIgnoreCase(value); + } } \ No newline at end of file diff --git a/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/client/COSPropertyConstants.java b/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/client/COSPropertyConstants.java index f7ae7826aa1..6acac4ec849 100644 --- a/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/client/COSPropertyConstants.java +++ b/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/client/COSPropertyConstants.java @@ -36,4 +36,7 @@ public class COSPropertyConstants { // The max keys to return per list objects request public static final String COS_MAX_KEYS = "max.keys"; + + // Compress resources when reading/writing to COS + public static final String COS_COMPRESS = "compress"; } \ No newline at end of file diff --git a/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/client/CosPropertyGroupAdapter.java b/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/client/CosPropertyGroupAdapter.java index 01a0589438d..32dfa249411 100644 --- a/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/client/CosPropertyGroupAdapter.java +++ b/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/client/CosPropertyGroupAdapter.java @@ -20,15 +20,16 @@ public class CosPropertyGroupAdapter implements COSConfigAdapter { private static final Logger logger = Logger.getLogger(CosPropertyGroupAdapter.class.getName()); // Property key constants - public static final String PROP_CREDENTIAL_IBM = "credentialIBM"; - public static final String PROP_BUCKET_NAME = "bucketName"; - public static final String PROP_API_KEY = "apiKey"; - public static final String PROP_SRV_INST_ID = "srvInstId"; - public static final String PROP_LOCATION = "location"; - public static final String PROP_ENDPOINT_URL = "endpoint"; - public static final String PROP_REQUEST_TIMEOUT = "requestTimeout"; - public static final String PROP_SOCKET_TIMEOUT = "socketTimeout"; - public static final String PROP_MAX_KEYS = "maxKeys"; + public static final String PROP_CREDENTIAL_IBM = "connectionProperties/credentialIBM"; + public static final String PROP_BUCKET_NAME = "connectionProperties/bucketName"; + public static final String PROP_API_KEY = "connectionProperties/apiKey"; + public static final String PROP_SRV_INST_ID = "connectionProperties/srvInstId"; + public static final String PROP_LOCATION = "connectionProperties/location"; + public static final String PROP_ENDPOINT_URL = "connectionProperties/endpoint"; + public static final String PROP_REQUEST_TIMEOUT = "connectionProperties/requestTimeout"; + public static final String PROP_SOCKET_TIMEOUT = "connectionProperties/socketTimeout"; + public static final String PROP_MAX_KEYS = "connectionProperties/maxKeys"; + public static final String PROP_COMPRESS = "compress"; // The property group we are wrapping private final PropertyGroup propertyGroup; @@ -76,6 +77,17 @@ public boolean isCredentialIBM() { } + @Override + public boolean isCompress() { + try { + return propertyGroup.getBooleanProperty(PROP_COMPRESS, false); + } catch (Exception x) { + logger.log(Level.SEVERE, PROP_CREDENTIAL_IBM, x); + throw new IllegalArgumentException("property not configured: " + PROP_CREDENTIAL_IBM); + } + + } + @Override public String getApiKey() { return getStringProp(PROP_API_KEY); diff --git a/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/impl/COSClientManager.java b/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/impl/COSClientManager.java index 585e5dcc32f..34b69cb13b7 100644 --- a/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/impl/COSClientManager.java +++ b/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/impl/COSClientManager.java @@ -107,14 +107,14 @@ private static COSPayloadClient newClient(TenantDatasourceKey key) { throw new IllegalStateException("Unsupported 'type' property value within datasource property group: " + type); } - // Get the connection properties + // Check that the connection properties are configured PropertyGroup connectionProps = dsPG.getPropertyGroup("connectionProperties"); if (connectionProps == null) { throw new IllegalStateException("Could not locate 'connectionProperties' property group within datasource property group: " + dsPropertyName); } - // Wrap the connection properties in an adapter to simplify access - CosPropertyGroupAdapter adapter = new CosPropertyGroupAdapter(connectionProps); + // Wrap the main properties in an adapter to simplify access + CosPropertyGroupAdapter adapter = new CosPropertyGroupAdapter(dsPG); return new COSPayloadClient(key.getTenantId(), key.getDatasourceId(), adapter); } catch (Exception x) { throw new IllegalStateException(x); diff --git a/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/payload/FHIRPayloadPersistenceCosImpl.java b/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/payload/FHIRPayloadPersistenceCosImpl.java index 06906a60cfc..33979473762 100644 --- a/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/payload/FHIRPayloadPersistenceCosImpl.java +++ b/fhir-persistence-cos/src/main/java/com/ibm/fhir/persistence/cos/payload/FHIRPayloadPersistenceCosImpl.java @@ -1,5 +1,5 @@ /* - * (C) Copyright IBM Corp. 2021 + * (C) Copyright IBM Corp. 2021, 2022 * * SPDX-License-Identifier: Apache-2.0 */ @@ -12,13 +12,21 @@ import java.util.logging.Level; import java.util.logging.Logger; +import com.ibm.fhir.config.FHIRConfigHelper; +import com.ibm.fhir.config.FHIRConfiguration; +import com.ibm.fhir.config.PropertyGroup; import com.ibm.fhir.model.resource.Resource; +import com.ibm.fhir.persistence.FHIRPersistenceSupport; import com.ibm.fhir.persistence.cos.client.COSPayloadClient; +import com.ibm.fhir.persistence.cos.client.CosPropertyGroupAdapter; import com.ibm.fhir.persistence.cos.impl.COSClientManager; import com.ibm.fhir.persistence.exception.FHIRPersistenceException; import com.ibm.fhir.persistence.payload.FHIRPayloadPersistence; -import com.ibm.fhir.persistence.payload.PayloadKey; -import com.ibm.fhir.persistence.payload.PayloadPersistenceHelper; +import com.ibm.fhir.persistence.payload.PayloadPersistenceResponse; +import com.ibm.fhir.persistence.payload.PayloadPersistenceResult; +import com.ibm.fhir.persistence.payload.PayloadPersistenceResult.Status; +import com.ibm.fhir.persistence.payload.PayloadReader; +import com.ibm.fhir.persistence.payload.PayloadReaderImpl; import com.ibm.fhir.persistence.util.InputOutputByteStream; /** @@ -29,41 +37,43 @@ public class FHIRPayloadPersistenceCosImpl implements FHIRPayloadPersistence { private static final Logger logger = Logger.getLogger(FHIRPayloadPersistenceCosImpl.class.getName()); @Override - public Future storePayload(String resourceTypeName, int resourceTypeId, String logicalId, int version, Resource resource) + public PayloadPersistenceResponse storePayload(String resourceTypeName, int resourceTypeId, String logicalId, int version, String resourcePayloadKey, Resource resource) throws FHIRPersistenceException { long start = System.nanoTime(); COSPayloadClient cpc = COSClientManager.getClientForTenantDatasource(); + Future result; + final String objectName = makeObjectName(resourceTypeId, logicalId, version, resourcePayloadKey); try { // Render the object to a byte-stream but don't compress when storing in Cos // (although this could be made a configurable option if we want) - InputOutputByteStream ioStream = PayloadPersistenceHelper.render(resource, false); - final String objectName = makeObjectName(resourceTypeId, logicalId, version); + InputOutputByteStream ioStream = FHIRPersistenceSupport.render(resource, false); cpc.write(objectName, ioStream); - PayloadKey payloadKey = new PayloadKey(resourceTypeName, resourceTypeId, logicalId, version, null, objectName, PayloadKey.Status.OK); - return CompletableFuture.completedFuture(payloadKey); + result = CompletableFuture.completedFuture(new PayloadPersistenceResult(Status.OK)); + } catch (Exception x) { + result = CompletableFuture.completedFuture(new PayloadPersistenceResult(Status.FAILED)); } finally { if (logger.isLoggable(Level.FINE)) { long elapsed = System.nanoTime() - start; logger.fine(String.format("Wrote resource payload to COS: '%s/%s/%d' [took %5.3f s]", resourceTypeName, logicalId, version, elapsed/1e9)); } } + PayloadPersistenceResponse response = new PayloadPersistenceResponse(resourcePayloadKey, resourceTypeName, resourceTypeId, logicalId, version, result); + return response; } @Override - public T readResource(Class resourceType, int resourceTypeId, String logicalId, int version, List elements) throws FHIRPersistenceException { + public T readResource(Class resourceType, String rowResourceTypeName, int resourceTypeId, String logicalId, int version, String resourcePayloadKey, List elements) throws FHIRPersistenceException { final long start = System.nanoTime(); COSPayloadClient cpc = COSClientManager.getClientForTenantDatasource(); - final String objectName = makeObjectName(resourceTypeId, logicalId, version); + final CosPropertyGroupAdapter config = getConfigAdapter(); + final String objectName = makeObjectName(resourceTypeId, logicalId, version, resourcePayloadKey); try { - return cpc.read(objectName, is -> PayloadPersistenceHelper.parse(resourceType, is, elements)); - - } catch (RuntimeException x) { - logger.severe("Failed to read payload for: '" + resourceType.getSimpleName() + "/" + logicalId + "/" + version + "', objectName = '" + objectName + "'"); - throw new FHIRPersistenceException("Failed to parse resource", x); + PayloadReader payloadReader = new PayloadReaderImpl(config.isCompress(), elements); + return cpc.read(resourceType, objectName, payloadReader); } finally { if (logger.isLoggable(Level.FINE)) { long elapsed = System.nanoTime() - start; @@ -72,58 +82,31 @@ public T readResource(Class resourceType, int resourceTy } } - @Override - public Future readResource(Class resourceType, PayloadKey payloadKey) throws FHIRPersistenceException { - final long start = System.nanoTime(); - COSPayloadClient cpc = COSClientManager.getClientForTenantDatasource(); - - final String objectName = makeObjectName(payloadKey); - try { - // We're not supporting async behavior yet, so we complete right away - T resource = cpc.read(objectName, is -> PayloadPersistenceHelper.parse(resourceType, is, null)); - return CompletableFuture.completedFuture(resource); - } catch (RuntimeException x) { - logger.severe("Failed to read payload for key: '" + payloadKey + "'"); - throw new FHIRPersistenceException("Failed to parse resource", x); - } finally { - if (logger.isLoggable(Level.FINE)) { - long elapsed = System.nanoTime() - start; - logger.fine(String.format("Direct read of resource payload from COS: '%s/%s' [took %5.3f s]", resourceType.getSimpleName(), payloadKey.toString(), elapsed/1e9)); - } - } - } - /** * Generate the COS object name to use for the given set of parameters * @param resourceTypeId * @param logicalId * @param version + * @param resourcePayloadKey * @return */ - private static String makeObjectName(int resourceTypeId, String logicalId, int version) { + private static String makeObjectName(int resourceTypeId, String logicalId, int version, String resourcePayloadKey) { StringBuilder objectName = new StringBuilder(); objectName.append(Integer.toString(resourceTypeId)); objectName.append("/"); objectName.append(logicalId); objectName.append("/"); objectName.append(Integer.toString(version)); + objectName.append("/"); + objectName.append(resourcePayloadKey); return objectName.toString(); } - /** - * Obtain the COS object name using the information encoded in the payloadKey - * @param payloadKey - * @return - */ - private static String makeObjectName(PayloadKey payloadKey) { - return payloadKey.getPayloadId(); - } - @Override - public void deletePayload(int resourceTypeId, String logicalId, int version) throws FHIRPersistenceException { + public void deletePayload(String resourceType, int resourceTypeId, String logicalId, Integer version, String resourcePayloadKey) throws FHIRPersistenceException { COSPayloadClient cpc = COSClientManager.getClientForTenantDatasource(); - final String objectName = makeObjectName(resourceTypeId, logicalId, version); + final String objectName = makeObjectName(resourceTypeId, logicalId, version, resourcePayloadKey); try { cpc.delete(objectName); } catch (FHIRPersistenceException x) { @@ -131,4 +114,19 @@ public void deletePayload(int resourceTypeId, String logicalId, int version) thr throw x; } } + + /** + * Get the tenant-specific configuration for COS + * @return + */ + private CosPropertyGroupAdapter getConfigAdapter() { + // get the PropertyGroup for the current tenant/datasource + final String dsId = "default"; // only one payload datasource for COS + String dsPropertyName = FHIRConfiguration.PROPERTY_PERSISTENCE_PAYLOAD + "/" + dsId; + PropertyGroup dsPG = FHIRConfigHelper.getPropertyGroup(dsPropertyName); + if (dsPG == null) { + throw new IllegalStateException("Could not locate configuration property group: " + dsPropertyName); + } + return new CosPropertyGroupAdapter(dsPG); + } } \ No newline at end of file diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/FHIRPersistenceJDBCCache.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/FHIRPersistenceJDBCCache.java index c1ffaabcee5..09c23f0ab8c 100644 --- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/FHIRPersistenceJDBCCache.java +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/FHIRPersistenceJDBCCache.java @@ -22,6 +22,11 @@ public interface FHIRPersistenceJDBCCache { * @return */ boolean needToPrefill(); + + /** + * Clear the needToPrefill flag - call after the prefill has been done + */ + void clearNeedToPrefill(); /** * Getter for the common token values cache diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/FHIRResourceDAOFactory.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/FHIRResourceDAOFactory.java index 71bb2efe947..30ffa211339 100644 --- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/FHIRResourceDAOFactory.java +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/FHIRResourceDAOFactory.java @@ -18,6 +18,7 @@ import com.ibm.fhir.persistence.exception.FHIRPersistenceException; import com.ibm.fhir.persistence.jdbc.connection.FHIRDbFlavor; import com.ibm.fhir.persistence.jdbc.dao.ReindexResourceDAO; +import com.ibm.fhir.persistence.jdbc.dao.api.FhirSequenceDAO; import com.ibm.fhir.persistence.jdbc.dao.api.IResourceReferenceDAO; import com.ibm.fhir.persistence.jdbc.dao.api.ParameterDAO; import com.ibm.fhir.persistence.jdbc.dao.api.ResourceDAO; @@ -165,4 +166,27 @@ public static ResourceReferenceDAO getResourceReferenceDAO(Connection connection return rrd; } -} + /** + * Get an implementation of {@link FhirSequenceDAO} suitable for the database type described + * by flavor. + * @param connection + * @param flavor + * @return + */ + public static FhirSequenceDAO getSequenceDAO(Connection connection, FHIRDbFlavor flavor) { + FhirSequenceDAO result = null; + switch (flavor.getType()) { + case DB2: + // Derby syntax also works for Db2 + result = new com.ibm.fhir.persistence.jdbc.derby.FhirSequenceDAOImpl(connection); + break; + case DERBY: + result = new com.ibm.fhir.persistence.jdbc.derby.FhirSequenceDAOImpl(connection); + break; + case POSTGRESQL: + result = new com.ibm.fhir.persistence.jdbc.postgres.FhirSequenceDAOImpl(connection); + break; + } + return result; + } +} \ No newline at end of file diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/TransactionData.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/TransactionData.java index 8df98c2d758..af6903384af 100644 --- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/TransactionData.java +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/TransactionData.java @@ -1,12 +1,11 @@ /* - * (C) Copyright IBM Corp. 2020 + * (C) Copyright IBM Corp. 2020, 2021 * * SPDX-License-Identifier: Apache-2.0 */ package com.ibm.fhir.persistence.jdbc; - /** * Used to hold data accumulated by the JDBC persistence layer in the current * transaction. The data is persisted immediately prior to the transaction @@ -19,4 +18,4 @@ public interface TransactionData { * the error and mark the transaction for rollback. */ void persist(); -} +} \ No newline at end of file diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/cache/FHIRPersistenceJDBCCacheImpl.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/cache/FHIRPersistenceJDBCCacheImpl.java index 4e3a95a903a..5ae9580e1d7 100644 --- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/cache/FHIRPersistenceJDBCCacheImpl.java +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/cache/FHIRPersistenceJDBCCacheImpl.java @@ -96,7 +96,13 @@ public void transactionRolledBack() { @Override public boolean needToPrefill() { - // should return true only ever once - return needToPrefillFlag.getAndSet(false); + // To avoid a race condition at server startup, don't reset + // the flag until the cache has actually been filled + return needToPrefillFlag.get(); + } + + @Override + public void clearNeedToPrefill() { + needToPrefillFlag.set(false); } } \ No newline at end of file diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/cache/FHIRPersistenceJDBCCacheUtil.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/cache/FHIRPersistenceJDBCCacheUtil.java index 6f8afabee99..d7d43a6a00f 100644 --- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/cache/FHIRPersistenceJDBCCacheUtil.java +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/cache/FHIRPersistenceJDBCCacheUtil.java @@ -27,8 +27,8 @@ public class FHIRPersistenceJDBCCacheUtil { public static FHIRPersistenceJDBCCache create(int codeSystemCacheSize, int tokenValueCacheSize, int canonicalCacheSize) { ICommonTokenValuesCache rrc = new CommonTokenValuesCacheImpl(codeSystemCacheSize, tokenValueCacheSize, canonicalCacheSize); return new FHIRPersistenceJDBCCacheImpl(new NameIdCache(), new IdNameCache(), new NameIdCache(), rrc); - } + /** * Prefill the cache with constants already committed in the database * @param connection diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/connection/FHIRUserTransactionAdapter.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/connection/FHIRUserTransactionAdapter.java index 6bbd9d4e238..7e19cbe873e 100644 --- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/connection/FHIRUserTransactionAdapter.java +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/connection/FHIRUserTransactionAdapter.java @@ -47,17 +47,25 @@ public class FHIRUserTransactionAdapter implements FHIRPersistenceTransaction { // support nesting by tracking the number of begin/end requests private int startCount; + // A handler to be called after a transaction has been rolled back + private final Runnable rolledBackHandler; + /** * Public constructor * @param tx + * @param syncRegistry + * @param cache + * @param transactionDataKey + * @param rolledBackHandler */ public FHIRUserTransactionAdapter(UserTransaction tx, TransactionSynchronizationRegistry syncRegistry, FHIRPersistenceJDBCCache cache, - String transactionDataKey) { + String transactionDataKey, Runnable rolledBackHandler) { this.userTransaction = tx; this.syncRegistry = syncRegistry; this.cache = cache; this.transactionDataKey = transactionDataKey; startedByThis = false; + this.rolledBackHandler = rolledBackHandler; } /** @@ -83,7 +91,7 @@ public void begin() throws FHIRPersistenceException { // On starting a new transaction, we need to register a callback so that // the cache is informed when the transaction commits it can promote thread-local // ids to the shared caches. - syncRegistry.registerInterposedSynchronization(new CacheTransactionSync(this.syncRegistry, this.cache, this.transactionDataKey)); + syncRegistry.registerInterposedSynchronization(new CacheTransactionSync(this.syncRegistry, this.cache, this.transactionDataKey, this.rolledBackHandler)); } catch (Exception x) { log.log(Level.SEVERE, "failed to start transaction", x); @@ -97,7 +105,7 @@ public void begin() throws FHIRPersistenceException { // On starting a bulk transaction, we need to register a callback so that // the cache is informed when the transaction commits it can promote thread-local // ids to the shared caches. - syncRegistry.registerInterposedSynchronization(new CacheTransactionSync(this.syncRegistry, this.cache, this.transactionDataKey)); + syncRegistry.registerInterposedSynchronization(new CacheTransactionSync(this.syncRegistry, this.cache, this.transactionDataKey, this.rolledBackHandler)); // transaction is already active, so this is a nested request this.startCount++; diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/connection/FHIRUserTransactionFactory.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/connection/FHIRUserTransactionFactory.java index cadd4d796ed..1d8ee594537 100644 --- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/connection/FHIRUserTransactionFactory.java +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/connection/FHIRUserTransactionFactory.java @@ -1,5 +1,5 @@ /* - * (C) Copyright IBM Corp. 2020 + * (C) Copyright IBM Corp. 2020, 2021 * * SPDX-License-Identifier: Apache-2.0 */ @@ -25,20 +25,29 @@ public class FHIRUserTransactionFactory implements FHIRTransactionFactory { private final FHIRPersistenceJDBCCache cache; private final String transactionDataKey; + + private final Runnable rolledBackHandler; /** * Public constructor + * * @param tx + * @param syncReg + * @param cache + * @param transactionDataKey + * @param rolledBackHandler */ - public FHIRUserTransactionFactory(UserTransaction tx, TransactionSynchronizationRegistry syncReg, FHIRPersistenceJDBCCache cache, String transactionDataKey) { + public FHIRUserTransactionFactory(UserTransaction tx, TransactionSynchronizationRegistry syncReg, FHIRPersistenceJDBCCache cache, String transactionDataKey, + Runnable rolledBackHandler) { this.userTransaction = tx; this.syncRegistry = syncReg; this.cache = cache; this.transactionDataKey = transactionDataKey; + this.rolledBackHandler = rolledBackHandler; } @Override public FHIRPersistenceTransaction create() { - return new FHIRUserTransactionAdapter(userTransaction, syncRegistry, cache, transactionDataKey); + return new FHIRUserTransactionAdapter(userTransaction, syncRegistry, cache, transactionDataKey, rolledBackHandler); } } diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/EraseResourceDAO.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/EraseResourceDAO.java index 8645e74c835..71aa9e98ff9 100644 --- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/EraseResourceDAO.java +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/EraseResourceDAO.java @@ -12,6 +12,8 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Types; +import java.util.ArrayList; +import java.util.List; import java.util.logging.Level; import java.util.logging.Logger; @@ -20,9 +22,12 @@ import com.ibm.fhir.persistence.ResourceEraseRecord; import com.ibm.fhir.persistence.erase.EraseDTO; import com.ibm.fhir.persistence.jdbc.FHIRPersistenceJDBCCache; +import com.ibm.fhir.persistence.jdbc.FHIRResourceDAOFactory; import com.ibm.fhir.persistence.jdbc.connection.FHIRDbFlavor; +import com.ibm.fhir.persistence.jdbc.dao.api.FhirSequenceDAO; import com.ibm.fhir.persistence.jdbc.dao.api.IResourceReferenceDAO; import com.ibm.fhir.persistence.jdbc.dao.impl.ResourceDAOImpl; +import com.ibm.fhir.persistence.jdbc.dto.ErasedResourceRec; import com.ibm.fhir.persistence.jdbc.util.ParameterTableSupport; /** @@ -47,12 +52,15 @@ public class EraseResourceDAO extends ResourceDAOImpl { private static final String CLASSNAME = EraseResourceDAO.class.getSimpleName(); private static final Logger LOG = Logger.getLogger(CLASSNAME); - private static final String CALL_POSTGRES = "{CALL %s.ERASE_RESOURCE(?, ?, ?)}"; - private static final String CALL_DB2 = "CALL %s.ERASE_RESOURCE(?, ?, ?)"; + private static final String CALL_POSTGRES = "{CALL %s.ERASE_RESOURCE(?, ?, ?, ?)}"; + private static final String CALL_DB2 = "CALL %s.ERASE_RESOURCE(?, ?, ?, ?)"; // The translator specific to the database type we're working with private final IDatabaseTranslator translator; + // The name of the admin schema where we find the SV_TENANT_ID variable + private final String adminSchemaName; + private ResourceEraseRecord eraseRecord; private EraseDTO eraseDto; @@ -60,15 +68,17 @@ public class EraseResourceDAO extends ResourceDAOImpl { * Public constructor * * @param conn + * @param adminSchemaName * @param translator * @param schemaName * @param flavor * @param cache * @param rrd */ - public EraseResourceDAO(Connection conn, IDatabaseTranslator translator, String schemaName, FHIRDbFlavor flavor, FHIRPersistenceJDBCCache cache, + public EraseResourceDAO(Connection conn, String adminSchemaName, IDatabaseTranslator translator, String schemaName, FHIRDbFlavor flavor, FHIRPersistenceJDBCCache cache, IResourceReferenceDAO rrd) { super(conn, schemaName, flavor, cache, rrd); + this.adminSchemaName = adminSchemaName; this.translator = translator; } @@ -76,16 +86,18 @@ public EraseResourceDAO(Connection conn, IDatabaseTranslator translator, String * Execute the stored procedure/function to erase the content. * * @param callStr the sql that should be executed + * @param erasedResourceGroupId the id used to group together ERASED_RESOURCES records made by this procedure * @throws Exception */ - private void runCallableStatement(String callStr) throws Exception { + private void runCallableStatement(String callStr, long erasedResourceGroupId) throws Exception { try (CallableStatement call = getConnection().prepareCall(String.format(callStr, getSchemaName()))){ call.setString(1, eraseDto.getResourceType()); call.setString(2, eraseDto.getLogicalId()); - call.registerOutParameter(3, Types.BIGINT); + call.setLong(3, erasedResourceGroupId); + call.registerOutParameter(4, Types.BIGINT); call.execute(); - int deleted = (int) call.getLong(3); + int deleted = (int) call.getLong(4); if (LOG.isLoggable(Level.FINEST)) { LOG.finest("Deleted from [" + eraseDto.getResourceType() + "/" + eraseDto.getLogicalId() + "] deleted [" + deleted + "]"); } @@ -105,10 +117,11 @@ private void runCallableStatement(String callStr) throws Exception { /** * Executes the SQL logic as part of the dao rather than via a stored procedure/function. - * + * + * @param erasedResourceGroupId * @throws SQLException */ - public void runInDao() throws SQLException { + public void runInDao(long erasedResourceGroupId) throws SQLException { String resourceType = eraseDto.getResourceType(); String logicalId = eraseDto.getLogicalId(); @@ -188,6 +201,24 @@ public void runInDao() throws SQLException { // If the version is 1, we need to fall all the way through and treat it // as a whole. if (eraseDto.getVersion() != null && version != 1) { + + // Record the affected record + final String INSERT_ERASED_RESOURCES = translator.getType() == DbType.DB2 + ? "INSERT INTO erased_resources(mt_id, erased_resource_group_id, resource_type_id, logical_id, version_id) " + + " VALUES (" + adminSchemaName + ".SV_TENANT_ID, ?, ?, ?, ?)" + : "INSERT INTO erased_resources(erased_resource_group_id, resource_type_id, logical_id, version_id) " + + " VALUES (?, ?, ?, ?)"; + try (PreparedStatement stmt = getConnection().prepareStatement(INSERT_ERASED_RESOURCES)) { + stmt.setLong(1, erasedResourceGroupId); + stmt.setInt(2, resourceTypeId); + stmt.setString(3, logicalId); + stmt.setInt(4, eraseDto.getVersion()); + stmt.executeUpdate(); + } catch (SQLException x) { + LOG.log(Level.SEVERE, INSERT_ERASED_RESOURCES, x); + throw translator.translate(x); + } + // Update the specific version's PAYLOAD by updating the resource final String UPDATE_RESOURCE_PAYLOAD = "UPDATE " + resourceType + "_RESOURCES" + @@ -226,6 +257,22 @@ public void runInDao() throws SQLException { return; } + // The entire logical resource is being erased, so don't include a version when we record this + final String INSERT_ERASED_RESOURCES = translator.getType() == DbType.DB2 + ? "INSERT INTO erased_resources(mt_id, erased_resource_group_id, resource_type_id, logical_id) " + + " VALUES (" + adminSchemaName + ".SV_TENANT_ID, ?, ?, ?)" + : "INSERT INTO erased_resources(erased_resource_group_id, resource_type_id, logical_id) " + + " VALUES (?, ?, ?)"; + try (PreparedStatement stmt = getConnection().prepareStatement(INSERT_ERASED_RESOURCES)) { + stmt.setLong(1, erasedResourceGroupId); + stmt.setInt(2, resourceTypeId); + stmt.setString(3, logicalId); + stmt.executeUpdate(); + } catch (SQLException x) { + LOG.log(Level.SEVERE, INSERT_ERASED_RESOURCES, x); + throw translator.translate(x); + } + // Step 2: Delete from resource_change_log final String RCL_DELETE = "DELETE FROM RESOURCE_CHANGE_LOG" @@ -309,17 +356,79 @@ public void deleteFromAllParametersTables(String tablePrefix, long logicalResour * @param eraseDto the input * @throws Exception */ - public void erase(ResourceEraseRecord eraseRecord, EraseDTO eraseDto) throws Exception { + public long erase(ResourceEraseRecord eraseRecord, EraseDTO eraseDto) throws Exception { this.eraseRecord = eraseRecord; this.eraseDto = eraseDto; + + // Assign the ERASE_RESOURCE_GROUP_ID which is used to record all the + // logical_resource and resource_versions erased here + FhirSequenceDAO fhirSequence = FHIRResourceDAOFactory.getSequenceDAO(getConnection(), getFlavor()); + long erasedResourceGroupId = fhirSequence.nextValue(); if (DbType.DB2.equals(getFlavor().getType()) && eraseDto.getVersion() == null) { - runCallableStatement(CALL_DB2); + runCallableStatement(CALL_DB2, erasedResourceGroupId); } else if (DbType.POSTGRESQL.equals(getFlavor().getType()) && eraseDto.getVersion() == null) { - runCallableStatement(CALL_POSTGRES); + runCallableStatement(CALL_POSTGRES, erasedResourceGroupId); } else { // Uses the Native Java to execute a Resource Erase - runInDao(); + runInDao(erasedResourceGroupId); + } + + // So we know which records have been erased + return erasedResourceGroupId; + } + + /** + * Fetch all the ERASED_RESOURCE records associated with the given erasedResourceGroupId + * @param erasedResourceGroupId + * @return + */ + public List getErasedResourceRecords(long erasedResourceGroupId) { + List result = new ArrayList<>(); + + final String SELECT_RECORDS = + "SELECT erased_resource_id, resource_type_id, logical_id, version_id " + + " FROM erased_resources " + + " WHERE erased_resource_group_id = ?"; + + try (PreparedStatement stmt = getConnection().prepareStatement(SELECT_RECORDS)) { + stmt.setLong(1, erasedResourceGroupId); + ResultSet rs = stmt.executeQuery(); + while (rs.next()) { + long erasedResourceId = rs.getLong(1); + int resourceTypeId = rs.getInt(2); + String logicalId = rs.getString(3); + Integer versionId = rs.getInt(4); + if (rs.wasNull()) { + versionId = null; + } + + ErasedResourceRec rec = new ErasedResourceRec(erasedResourceId, resourceTypeId, logicalId, versionId); + result.add(rec); + } + } catch (SQLException x) { + LOG.log(Level.SEVERE, SELECT_RECORDS, x); + throw translator.translate(x); + } + + return result; + } + + /** + * Delete all the ERASED_RESOURCE records belonging to the given erasedResourceGroupId + * @param erasedResourceGroupId + */ + public void clearErasedResourcesInGroup(long erasedResourceGroupId) { + final String DEL = + "DELETE FROM erased_resources " + + " WHERE erased_resource_group_id = ?"; + + try (PreparedStatement stmt = getConnection().prepareStatement(DEL)) { + stmt.setLong(1, erasedResourceGroupId); + stmt.executeUpdate(); + } catch (SQLException x) { + LOG.log(Level.SEVERE, DEL, x); + throw translator.translate(x); } } -} +} \ No newline at end of file diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/FHIRDbDAOImpl.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/FHIRDbDAOImpl.java index 2a42f51678e..faa10d7ad16 100644 --- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/FHIRDbDAOImpl.java +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/FHIRDbDAOImpl.java @@ -26,7 +26,6 @@ import com.ibm.fhir.model.type.code.IssueType; import com.ibm.fhir.model.util.FHIRUtil; import com.ibm.fhir.persistence.exception.FHIRPersistenceException; -import com.ibm.fhir.persistence.jdbc.JDBCConstants; import com.ibm.fhir.persistence.jdbc.connection.FHIRDbFlavor; import com.ibm.fhir.persistence.jdbc.dao.api.FHIRDbDAO; import com.ibm.fhir.persistence.jdbc.dto.Resource; @@ -405,8 +404,12 @@ protected List createDTOs(ResultSet resultSet) throws FHIRPersistenceD List dtoList = new ArrayList(); try { + // Some queries include the resource type id, which we need to know + // when fetching offloaded payloads. + boolean hasResourceTypeId = resultSet.getMetaData().getColumnCount() == 9; + while (resultSet.next()) { - dto = this.createDTO(resultSet); + dto = this.createDTO(resultSet, hasResourceTypeId); if (dto != null) { dtoList.add(dto); } @@ -428,7 +431,7 @@ protected List createDTOs(ResultSet resultSet) throws FHIRPersistenceD * @return T - An instance of type T, which is a FHIR Data Transfer Object. * @throws FHIRPersistenceDataAccessException */ - protected Resource createDTO(ResultSet resultSet) throws FHIRPersistenceDataAccessException { + protected Resource createDTO(ResultSet resultSet, boolean hasResourceTypeId) throws FHIRPersistenceDataAccessException { // Can be overridden by subclasses that need to return DTOs. return null; } diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/ResourceDAOImpl.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/ResourceDAOImpl.java index 867da840ecb..62b05bd4864 100644 --- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/ResourceDAOImpl.java +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dao/impl/ResourceDAOImpl.java @@ -77,28 +77,30 @@ public class ResourceDAOImpl extends FHIRDbDAOImpl implements ResourceDAO { public static final int IDX_IS_DELETED = 5; public static final int IDX_DATA = 6; public static final int IDX_LOGICAL_ID = 7; + public static final int IDX_RESOURCE_PAYLOAD_KEY = 8; + public static final int IDX_RESOURCE_TYPE_ID = 9; // Read the current version of the resource (even if the resource has been deleted) - private static final String SQL_READ = "SELECT R.RESOURCE_ID, R.LOGICAL_RESOURCE_ID, R.VERSION_ID, R.LAST_UPDATED, R.IS_DELETED, R.DATA, LR.LOGICAL_ID " + + private static final String SQL_READ = "SELECT R.RESOURCE_ID, R.LOGICAL_RESOURCE_ID, R.VERSION_ID, R.LAST_UPDATED, R.IS_DELETED, R.DATA, LR.LOGICAL_ID, R.RESOURCE_PAYLOAD_KEY " + "FROM %s_RESOURCES R, %s_LOGICAL_RESOURCES LR WHERE " + "LR.LOGICAL_ID = ? AND R.RESOURCE_ID = LR.CURRENT_RESOURCE_ID"; // Read a specific version of the resource private static final String SQL_VERSION_READ = - "SELECT R.RESOURCE_ID, R.LOGICAL_RESOURCE_ID, R.VERSION_ID, R.LAST_UPDATED, R.IS_DELETED, R.DATA, LR.LOGICAL_ID " + + "SELECT R.RESOURCE_ID, R.LOGICAL_RESOURCE_ID, R.VERSION_ID, R.LAST_UPDATED, R.IS_DELETED, R.DATA, LR.LOGICAL_ID, R.RESOURCE_PAYLOAD_KEY " + "FROM %s_RESOURCES R, %s_LOGICAL_RESOURCES LR WHERE " + "LR.LOGICAL_ID = ? AND R.LOGICAL_RESOURCE_ID = LR.LOGICAL_RESOURCE_ID AND R.VERSION_ID = ?"; // @formatter:off // 0 1 - // 1 2 3 4 5 6 7 8 9 0 1 2 3 + // 1 2 3 4 5 6 7 8 9 0 1 2 3 4 // @formatter:on // Don't forget that we must account for IN and OUT parameters. - private static final String SQL_INSERT_WITH_PARAMETERS = "CALL %s.add_any_resource(?,?,?,?,?,?,?,?,?,?,?,?,?)"; + private static final String SQL_INSERT_WITH_PARAMETERS = "CALL %s.add_any_resource(?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; // Read version history of the resource identified by its logical-id private static final String SQL_HISTORY = - "SELECT R.RESOURCE_ID, R.LOGICAL_RESOURCE_ID, R.VERSION_ID, R.LAST_UPDATED, R.IS_DELETED, R.DATA, LR.LOGICAL_ID " + + "SELECT R.RESOURCE_ID, R.LOGICAL_RESOURCE_ID, R.VERSION_ID, R.LAST_UPDATED, R.IS_DELETED, R.DATA, LR.LOGICAL_ID, R.RESOURCE_PAYLOAD_KEY " + "FROM %s_RESOURCES R, %s_LOGICAL_RESOURCES LR WHERE " + "LR.LOGICAL_ID = ? AND R.LOGICAL_RESOURCE_ID = LR.LOGICAL_RESOURCE_ID " + "ORDER BY R.VERSION_ID DESC "; @@ -108,7 +110,7 @@ public class ResourceDAOImpl extends FHIRDbDAOImpl implements ResourceDAO { "R.LOGICAL_RESOURCE_ID = LR.LOGICAL_RESOURCE_ID"; private static final String SQL_HISTORY_FROM_DATETIME = - "SELECT R.RESOURCE_ID, R.LOGICAL_RESOURCE_ID, R.VERSION_ID, R.LAST_UPDATED, R.IS_DELETED, R.DATA, LR.LOGICAL_ID " + + "SELECT R.RESOURCE_ID, R.LOGICAL_RESOURCE_ID, R.VERSION_ID, R.LAST_UPDATED, R.IS_DELETED, R.DATA, LR.LOGICAL_ID, R.RESOURCE_PAYLOAD_KEY " + "FROM %s_RESOURCES R, %s_LOGICAL_RESOURCES LR WHERE " + "LR.LOGICAL_ID = ? AND R.LAST_UPDATED >= ? AND R.LOGICAL_RESOURCE_ID = LR.LOGICAL_RESOURCE_ID " + "ORDER BY R.VERSION_ID DESC "; @@ -122,7 +124,7 @@ public class ResourceDAOImpl extends FHIRDbDAOImpl implements ResourceDAO { private static final String SQL_READ_RESOURCE_TYPE = "CALL %s.add_resource_type(?, ?)"; private static final String SQL_SEARCH_BY_IDS = - "SELECT R.RESOURCE_ID, R.LOGICAL_RESOURCE_ID, R.VERSION_ID, R.LAST_UPDATED, R.IS_DELETED, R.DATA, LR.LOGICAL_ID " + + "SELECT R.RESOURCE_ID, R.LOGICAL_RESOURCE_ID, R.VERSION_ID, R.LAST_UPDATED, R.IS_DELETED, R.DATA, LR.LOGICAL_ID, R.RESOURCE_PAYLOAD_KEY " + "FROM %s_RESOURCES R, %s_LOGICAL_RESOURCES LR WHERE R.LOGICAL_RESOURCE_ID = LR.LOGICAL_RESOURCE_ID AND " + "R.RESOURCE_ID IN "; @@ -245,11 +247,13 @@ public Resource versionRead(String logicalId, String resourceType, int versionId * * @param resultSet * A ResultSet containing FHIR persistent object data. + * @param hasResourceTypeId + * True if the ResultSet includes the RESOURCE_TYPE_ID column * @return Resource - A Resource DTO * @throws FHIRPersistenceDataAccessException */ @Override - protected Resource createDTO(ResultSet resultSet) throws FHIRPersistenceDataAccessException { + protected Resource createDTO(ResultSet resultSet, boolean hasResourceTypeId) throws FHIRPersistenceDataAccessException { final String METHODNAME = "createDTO"; log.entering(CLASSNAME, METHODNAME); @@ -266,6 +270,11 @@ protected Resource createDTO(ResultSet resultSet) throws FHIRPersistenceDataAcce resource.setLogicalId(resultSet.getString(IDX_LOGICAL_ID)); resource.setVersionId(resultSet.getInt(IDX_VERSION_ID)); resource.setDeleted(resultSet.getString(IDX_IS_DELETED).equals("Y") ? true : false); + resource.setResourcePayloadKey(resultSet.getString(IDX_RESOURCE_PAYLOAD_KEY)); + + if (hasResourceTypeId) { + resource.setResourceTypeId(resultSet.getInt(IDX_RESOURCE_TYPE_ID)); + } } catch (Throwable e) { FHIRPersistenceDataAccessException fx = new FHIRPersistenceDataAccessException("Failure creating Resource DTO."); throw severe(log, fx, e); @@ -524,14 +533,20 @@ public Resource insert(Resource resource, List paramete stmt.setString(1, resource.getResourceType()); stmt.setString(2, resource.getLogicalId()); - // Check for large objects, and branch around it. - boolean large = FhirSchemaConstants.STORED_PROCEDURE_SIZE_LIMIT < resource.getDataStream().size(); - if (large) { - // Outside of the normal flow we have a BIG JSON or XML - stmt.setNull(3, Types.BLOB); + boolean large = false; + if (resource.getDataStream() != null) { + // Check for large objects, and branch around it. + large = FhirSchemaConstants.STORED_PROCEDURE_SIZE_LIMIT < resource.getDataStream().size(); + if (large) { + // Outside of the normal flow we have a BIG JSON or XML + stmt.setNull(3, Types.BLOB); + } else { + // Normal Flow, we set the data + stmt.setBinaryStream(3, resource.getDataStream().inputStream()); + } } else { - // Normal Flow, we set the data - stmt.setBinaryStream(3, resource.getDataStream().inputStream()); + // payload offloaded to another data store + stmt.setNull(3, Types.BLOB); } lastUpdated = resource.getLastUpdated(); @@ -540,24 +555,25 @@ public Resource insert(Resource resource, List paramete stmt.setInt(6, resource.getVersionId()); stmt.setString(7, parameterHashB64); setInt(stmt, 8, ifNoneMatch); - stmt.registerOutParameter(9, Types.BIGINT); // logical_resource_id - stmt.registerOutParameter(10, Types.BIGINT); // resource_id - stmt.registerOutParameter(11, Types.VARCHAR); // current_hash - stmt.registerOutParameter(12, Types.INTEGER); // o_interaction_status - stmt.registerOutParameter(13, Types.INTEGER); // o_if_none_match_version + setString(stmt, 9, resource.getResourcePayloadKey()); + stmt.registerOutParameter(10, Types.BIGINT); // logical_resource_id + stmt.registerOutParameter(11, Types.BIGINT); // resource_id + stmt.registerOutParameter(12, Types.VARCHAR); // current_hash + stmt.registerOutParameter(13, Types.INTEGER); // o_interaction_status + stmt.registerOutParameter(14, Types.INTEGER); // o_if_none_match_version stmt.execute(); long latestTime = System.nanoTime(); double dbCallDuration = (latestTime-dbCallStartTime)/1e6; - resource.setId(stmt.getLong(9)); - final long versionedResourceRowId = stmt.getLong(10); - final String currentHash = stmt.getString(11); - final int interactionStatus = stmt.getInt(12); + resource.setId(stmt.getLong(10)); + final long versionedResourceRowId = stmt.getLong(11); + final String currentHash = stmt.getString(12); + final int interactionStatus = stmt.getInt(13); if (interactionStatus == 1) { // No update, so no need to make any more changes resource.setInteractionStatus(InteractionStatus.IF_NONE_MATCH_EXISTED); - resource.setIfNoneMatchVersion(stmt.getInt(13)); + resource.setIfNoneMatchVersion(stmt.getInt(14)); } else { resource.setInteractionStatus(InteractionStatus.MODIFIED); @@ -836,4 +852,19 @@ protected void setInt(PreparedStatement ps, int index, Integer value) throws SQL ps.setInt(index, value); } } + + /** + * Set a String parameter in the statement, handling null as required + * @param ps + * @param index + * @param value + * @throws SQLException + */ + protected void setString(PreparedStatement ps, int index, String value) throws SQLException { + if (value == null) { + ps.setNull(index, Types.VARCHAR); + } else { + ps.setString(index, value); + } + } } \ No newline at end of file diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/derby/DerbyResourceDAO.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/derby/DerbyResourceDAO.java index ade4fc517c9..eda3bb1e660 100644 --- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/derby/DerbyResourceDAO.java +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/derby/DerbyResourceDAO.java @@ -13,6 +13,7 @@ import java.sql.SQLException; import java.sql.SQLIntegrityConstraintViolationException; import java.sql.Timestamp; +import java.sql.Types; import java.util.Calendar; import java.util.List; import java.util.UUID; @@ -121,7 +122,7 @@ public Resource insert(Resource resource, List paramet long resourceId = this.storeResource(resource.getResourceType(), parameters, resource.getLogicalId(), - resource.getDataStream().inputStream(), + resource.getDataStream() != null ? resource.getDataStream().inputStream() : null, lastUpdated, resource.isDeleted(), sourceKey, @@ -130,6 +131,7 @@ public Resource insert(Resource resource, List paramet connection, parameterDao, ifNoneMatch, + resource.getResourcePayloadKey(), outInteractionStatus, outIfNoneMatchVersion ); @@ -212,7 +214,7 @@ public Resource insert(Resource resource, List paramet public long storeResource(String tablePrefix, List parameters, String p_logical_id, InputStream p_payload, Timestamp p_last_updated, boolean p_is_deleted, String p_source_key, Integer p_version, String p_parameterHashB64, Connection conn, - ParameterDAO parameterDao, Integer ifNoneMatch, + ParameterDAO parameterDao, Integer ifNoneMatch, String resourcePayloadKey, AtomicInteger outInteractionStatus, AtomicInteger outIfNoneMatchVersion) throws Exception { final Calendar UTC = CalendarHelper.getCalendarForUTC(); @@ -445,16 +447,23 @@ public long storeResource(String tablePrefix, List para if (logger.isLoggable(Level.FINEST)) { logger.finest("Creating " + tablePrefix + "_resources row: " + v_resource_type + "/" + p_logical_id); } - String sql3 = "INSERT INTO " + tablePrefix + "_resources (resource_id, logical_resource_id, version_id, data, last_updated, is_deleted) " - + "VALUES (?,?,?,?,?,?)"; + String sql3 = "INSERT INTO " + tablePrefix + "_resources (resource_id, logical_resource_id, version_id, data, last_updated, is_deleted, resource_payload_key) " + + "VALUES (?,?,?,?,?,?,?)"; try (PreparedStatement stmt = conn.prepareStatement(sql3)) { // bind parameters stmt.setLong(1, v_resource_id); stmt.setLong(2, v_logical_resource_id); stmt.setInt(3, p_version); - stmt.setBinaryStream(4, p_payload); + + if (p_payload != null) { + stmt.setBinaryStream(4, p_payload); + } else { + // payload offloaded to another data store + stmt.setNull(4, Types.BLOB); + } stmt.setTimestamp(5, p_last_updated, UTC); stmt.setString(6, p_is_deleted ? "Y" : "N"); + setString(stmt, 7, resourcePayloadKey); // can be null stmt.executeUpdate(); } diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/domain/SearchDataQuery.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/domain/SearchDataQuery.java index 59eac64f512..c4729f9ff41 100644 --- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/domain/SearchDataQuery.java +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/domain/SearchDataQuery.java @@ -14,12 +14,14 @@ * this class fetches the data and handles pagination */ public class SearchDataQuery extends SearchQuery { - // Is sorting required? boolean addSorting = true; // Is pagination required? boolean addPagination = true; + + int resourceTypeId = -1; + /** * Public constructor @@ -35,15 +37,16 @@ public SearchDataQuery(String resourceType) { * @param addSorting * @param addPagination */ - public SearchDataQuery(String resourceType, boolean addSorting, boolean addPagination) { + public SearchDataQuery(String resourceType, boolean addSorting, boolean addPagination, int resourceTypeId) { super(resourceType); + this.resourceTypeId = resourceTypeId; this.addSorting = addSorting; this.addPagination = addPagination; } @Override public T getRoot(SearchQueryVisitor visitor) { - return visitor.dataRoot(getRootResourceType()); + return visitor.dataRoot(getRootResourceType(), resourceTypeId); } @Override @@ -53,7 +56,8 @@ public T visit(SearchQueryVisitor visitor) throws FHIRPersistenceExceptio T query = super.visit(visitor); // Join the core logical resource selection to the resource versions table - query = visitor.joinResources(query); + final boolean includeResourceTypeId = this.resourceTypeId >= 0; + query = visitor.joinResources(query, includeResourceTypeId); // now attach the requisite ordering and pagination clauses if (addSorting) { diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/domain/SearchQuery.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/domain/SearchQuery.java index 7ac525b3dd5..18fc5ee49e5 100644 --- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/domain/SearchQuery.java +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/domain/SearchQuery.java @@ -21,6 +21,10 @@ public abstract class SearchQuery { private static final String CLASSNAME = SearchQuery.class.getName(); private static final Logger logger = Logger.getLogger(CLASSNAME); + + // constant to improve readability of method calls + public static final boolean INCLUDE_RESOURCE_TYPE_ID = true; + private final String rootResourceType; private final List searchParams = new ArrayList<>(); diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/domain/SearchQueryRenderer.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/domain/SearchQueryRenderer.java index 98cd75adfa5..84486a391d8 100644 --- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/domain/SearchQueryRenderer.java +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/domain/SearchQueryRenderer.java @@ -167,6 +167,18 @@ protected String resourceLogicalResources(String resourceType) { return resourceType + _LOGICAL_RESOURCES; } } + + protected String resourceTypeField(String resourceType, int resourceTypeId) { + // If the query is a whole-system-search running at the global + // logical_resources level, we can get the resource type directly + // from the logical_resources table + if (isWholeSystemSearch(resourceType)) { + return "LR.RESOURCE_TYPE_ID"; + } else { + // Use a literal value for the resource_type_id value + return Integer.toString(resourceTypeId); + } + } /** * Get the table name for the xx_resources table where xx is the resource type name @@ -267,13 +279,13 @@ AND EXISTS ( } @Override - public QueryData dataRoot(String rootResourceType) { + public QueryData dataRoot(String rootResourceType, int resourceTypeId) { /* // The data root query is formed as an inner select statement which we // then inner join to the xx_RESOURCES table as a final step. This is // crucial to enable the optimizer to generate the correct plan. // The final query looks something like this: - SELECT R.RESOURCE_ID, R.LOGICAL_RESOURCE_ID, R.VERSION_ID, R.LAST_UPDATED, R.IS_DELETED, R.DATA, LR.LOGICAL_ID + SELECT R.RESOURCE_ID, R.LOGICAL_RESOURCE_ID, R.VERSION_ID, R.LAST_UPDATED, R.IS_DELETED, R.DATA, LR.LOGICAL_ID, R.RESOURCE_PAYLOAD_KEY FROM ( SELECT LR0.LOGICAL_RESOURCE_ID, LR0.LOGICAL_ID, LR0.CURRENT_RESOURCE_ID FROM Patient_LOGICAL_RESOURCES AS LR0 @@ -297,6 +309,11 @@ AND EXISTS ( // parameters are bolted on as exists statements in the WHERE clause. The final // query is constructed when joinResources is called. SelectAdapter select = Select.select("LR0.LOGICAL_RESOURCE_ID", "LR0.LOGICAL_ID", "LR0.CURRENT_RESOURCE_ID"); + if (resourceTypeId >= 0) { + // needed for whole system search where the resource type is required + // in order to process the resource payload (which may be offloaded) + select.addColumn(Integer.toString(resourceTypeId), alias("RESOURCE_TYPE_ID")); + } select.from(xxLogicalResources, alias(lrAliasName)) .where(lrAliasName, IS_DELETED).eq().literal("N"); return new QueryData(select, lrAliasName, null, rootResourceType, 0); @@ -330,11 +347,18 @@ public QueryData getParameterBaseQuery(QueryData parent) { } @Override - public QueryData joinResources(QueryData queryData) { + public QueryData joinResources(QueryData queryData, boolean includeResourceTypeId) { final SelectAdapter logicalResources = queryData.getQuery(); final String xxResources = resourceResources(queryData.getResourceType()); final String lrAliasName = "LR"; - SelectAdapter select = Select.select("R.RESOURCE_ID", "R.LOGICAL_RESOURCE_ID", "R.VERSION_ID", "R.LAST_UPDATED", "R.IS_DELETED", "R.DATA", "LR.LOGICAL_ID"); + SelectAdapter select = Select.select("R.RESOURCE_ID", "R.LOGICAL_RESOURCE_ID", "R.VERSION_ID", "R.LAST_UPDATED", + "R.IS_DELETED", "R.DATA", "LR.LOGICAL_ID", "R.RESOURCE_PAYLOAD_KEY"); + + // Resource type id is used for whole-system-search cases where the query + // can return resources of different types (e.g. both Patient and Observation) + if (includeResourceTypeId) { + select.addColumn(lrAliasName, "RESOURCE_TYPE_ID", alias("RESOURCE_TYPE_ID")); + } select.from(logicalResources.build(), alias(lrAliasName)) .innerJoin(xxResources, alias("R"), on(lrAliasName, "CURRENT_RESOURCE_ID").eq("R", "RESOURCE_ID")); @@ -376,7 +400,8 @@ public QueryData wrapInclude(QueryData query) { final String lrAlias = "LR"; final String rAlias = "R"; final String rTable = query.getResourceType() + "_RESOURCES"; - SelectAdapter select = Select.select("LR.RESOURCE_ID", "LR.LOGICAL_RESOURCE_ID", "LR.VERSION_ID", "LR.LAST_UPDATED", "LR.IS_DELETED", "R.DATA", "LR.LOGICAL_ID"); + SelectAdapter select = Select.select("LR.RESOURCE_ID", "LR.LOGICAL_RESOURCE_ID", "LR.VERSION_ID", + "LR.LAST_UPDATED", "LR.IS_DELETED", "R.DATA", "LR.LOGICAL_ID", "R.RESOURCE_PAYLOAD_KEY"); select.from(query.getQuery().build(), alias(lrAlias)) .innerJoin(rTable, alias(rAlias), on(lrAlias, "RESOURCE_ID").eq(rAlias, "RESOURCE_ID")); return new QueryData(select, lrAlias, null, query.getResourceType(), 0); @@ -428,9 +453,10 @@ AND EXISTS ( } @Override - public QueryData wholeSystemDataRoot(String rootResourceType) { - /* Final query should look something like this: - SELECT R.RESOURCE_ID, R.LOGICAL_RESOURCE_ID, R.VERSION_ID, R.LAST_UPDATED, R.IS_DELETED, R.DATA, LR.LOGICAL_ID + public QueryData wholeSystemDataRoot(String rootResourceType, int rootResourceTypeId) { + /* Final query should look something like this (where [RTFIELD] depends on the type of the + * whole system search): + SELECT R.RESOURCE_ID, R.LOGICAL_RESOURCE_ID, R.VERSION_ID, R.LAST_UPDATED, R.IS_DELETED, R.DATA, LR.LOGICAL_ID, R.RESOURCE_PAYLOAD_KEY, [RTFIELD] AS RESOURCE_TYPE_ID FROM ( SELECT LR.LOGICAL_RESOURCE_ID, LR.LOGICAL_ID, LR.CURRENT_RESOURCE_ID FROM Patient_LOGICAL_RESOURCES AS LR @@ -441,11 +467,13 @@ AND LR.LOGICAL_RESOURCE_ID IN (2,4,6,10,12,14,20,24,26,29)) AS LR FETCH FIRST 10 ROWS ONLY */ final String xxLogicalResources = resourceLogicalResources(rootResourceType); + final String resourceTypeIdStr = Integer.toString(rootResourceTypeId); final String lrAliasName = "LR"; // The core data query joining together the logical resources table. The final // query is constructed when joinResources is called. SelectAdapter select = Select.select("LR.LOGICAL_RESOURCE_ID", "LR.LOGICAL_ID", "LR.CURRENT_RESOURCE_ID"); + select.addColumn(resourceTypeIdStr, alias("RESOURCE_TYPE_ID")); select.from(xxLogicalResources, alias(lrAliasName)) .where(lrAliasName, IS_DELETED).eq().literal("N"); return new QueryData(select, lrAliasName, null, rootResourceType, 0); @@ -468,7 +496,7 @@ SELECT SUM(CNT) ) AS COMBINED RESULTS Final query should look something like this for a data query: - SELECT RESOURCE_ID, LOGICAL_RESOURCE_ID, VERSION_ID, LAST_UPDATED, IS_DELETED, DATA, LOGICAL_ID + SELECT RESOURCE_ID, LOGICAL_RESOURCE_ID, VERSION_ID, LAST_UPDATED, IS_DELETED, DATA, LOGICAL_ID, RESOURCE_PAYLOAD_KEY, RESOURCE_TYPE_ID FROM ( UNION ALL @@ -485,7 +513,7 @@ SELECT SUM(CNT) if (isCountQuery) { select = Select.select("SUM(CNT)"); } else { - select = Select.select("RESOURCE_ID", "LOGICAL_RESOURCE_ID", "VERSION_ID", "LAST_UPDATED", "IS_DELETED", "DATA", "LOGICAL_ID"); + select = Select.select("RESOURCE_ID", "LOGICAL_RESOURCE_ID", "VERSION_ID", "LAST_UPDATED", "IS_DELETED", "DATA", "LOGICAL_ID", "RESOURCE_PAYLOAD_KEY", "RESOURCE_TYPE_ID"); } SelectAdapter first = null; SelectAdapter previous = null; @@ -1496,7 +1524,7 @@ public QueryData addIncludeFilter(QueryData queryData, InclusionParameter inclus // > If a resource has a reference that is versioned and _include is performed, // > the specified version SHOULD be provided. /* -SELECT R0.RESOURCE_ID, R0.LOGICAL_RESOURCE_ID, R0.VERSION_ID, R0.LAST_UPDATED, R0.IS_DELETED, R0.DATA, LR0.LOGICAL_ID +SELECT R0.RESOURCE_ID, R0.LOGICAL_RESOURCE_ID, R0.VERSION_ID, R0.LAST_UPDATED, R0.IS_DELETED, R0.DATA, R0.RESOURCE_PAYLOAD_KEY, LR0.LOGICAL_ID FROM fhirdata.ExplanationOfBenefit_TOKEN_VALUES_V AS P1 INNER JOIN fhirdata.Claim_LOGICAL_RESOURCES AS LR0 ON LR0.LOGICAL_ID = P1.TOKEN_VALUE diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/domain/SearchQueryVisitor.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/domain/SearchQueryVisitor.java index c47bbc64953..add209b72b9 100644 --- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/domain/SearchQueryVisitor.java +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/domain/SearchQueryVisitor.java @@ -30,17 +30,19 @@ public interface SearchQueryVisitor { /** * The root query (select statement) for the data query * @param rootResourceType + * @param resourceTypeId * @return */ - T dataRoot(String rootResourceType); + T dataRoot(String rootResourceType, int resourceTypeId); /** * Finish the data query by wrapping the root and joining the resources * table * @param queryData + * @param includeResourceTypeId include the resource_type_id in the select column list * @return */ - T joinResources(T queryData); + T joinResources(T queryData, boolean includeResourceTypeId); /** * Get the join to which we want to attach all the parameter tables. @@ -85,9 +87,10 @@ public interface SearchQueryVisitor { /** * The root of the FHIR whole-system data search query * @param rootResourceType + * @param rootResourceTypeId * @return */ - T wholeSystemDataRoot(String rootResourceType); + T wholeSystemDataRoot(String rootResourceType, int rootResourceTypeId); /** * The wrapper for whole-system search diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/domain/SearchWholeSystemDataQuery.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/domain/SearchWholeSystemDataQuery.java index fda89c66c07..f5d0dde9e3c 100644 --- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/domain/SearchWholeSystemDataQuery.java +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/domain/SearchWholeSystemDataQuery.java @@ -14,18 +14,22 @@ * this class fetches the resource data for whole system searches. */ public class SearchWholeSystemDataQuery extends SearchQuery { + + // The database resource_type_id matching the resourceType of this sub-query + final int resourceTypeId; /** * Public constructor * @param resourceType */ - public SearchWholeSystemDataQuery(String resourceType) { + public SearchWholeSystemDataQuery(String resourceType, int resourceTypeId) { super(resourceType); + this.resourceTypeId = resourceTypeId; } @Override public T getRoot(SearchQueryVisitor visitor) { - return visitor.wholeSystemDataRoot(getRootResourceType()); + return visitor.wholeSystemDataRoot(getRootResourceType(), this.resourceTypeId); } @Override @@ -41,7 +45,7 @@ public T visit(SearchQueryVisitor visitor) throws FHIRPersistenceExceptio visitExtensions(query, visitor); // Join the core logical resource selection to the resource versions table - query = visitor.joinResources(query); + query = visitor.joinResources(query, INCLUDE_RESOURCE_TYPE_ID); return query; } diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dto/ErasedResourceRec.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dto/ErasedResourceRec.java new file mode 100644 index 00000000000..3749582631d --- /dev/null +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dto/ErasedResourceRec.java @@ -0,0 +1,64 @@ +/* + * (C) Copyright IBM Corp. 2021 + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package com.ibm.fhir.persistence.jdbc.dto; + + +/** + * A DTO representing an erased resource from ERASED_RESOURCES + */ +public class ErasedResourceRec { + private final long erasedResourceId; + private final int resourceTypeId; + private final String logicalId; + private final Integer versionId; + + /** + * Public constructor + * @param erasedResourceId + * @param resourceTypeId + * @param logicalId + * @param versionId + */ + public ErasedResourceRec(long erasedResourceId, int resourceTypeId, String logicalId, Integer versionId) { + this.erasedResourceId = erasedResourceId; + this.resourceTypeId = resourceTypeId; + this.logicalId = logicalId; + this.versionId = versionId; + } + + + /** + * @return the erasedResourceId + */ + public long getErasedResourceId() { + return erasedResourceId; + } + + + /** + * @return the resourceTypeId + */ + public int getResourceTypeId() { + return resourceTypeId; + } + + + /** + * @return the logicalId + */ + public String getLogicalId() { + return logicalId; + } + + + /** + * @return the versionId + */ + public Integer getVersionId() { + return versionId; + } +} \ No newline at end of file diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dto/Resource.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dto/Resource.java index 7bd2b6864a2..7cbd092be82 100644 --- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dto/Resource.java +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/dto/Resource.java @@ -43,6 +43,11 @@ public class Resource { */ private String resourceType; + /** + * The resource type id set when reading resources from the database + */ + private int resourceTypeId = -1; + /** * This is the _RESOURCES.LAST_UPDATED column */ @@ -67,6 +72,11 @@ public class Resource { * The version of the resource found if we hit IfNoneMatch */ private Integer ifNoneMatchVersion; + + /** + * A unique key (UUID value) used to tie the RDBMS record with the offloaded payload + */ + private String resourcePayloadKey; public Resource() { super(); @@ -178,5 +188,33 @@ public InteractionStatus getInteractionStatus() { public void setInteractionStatus(InteractionStatus interactionStatus) { this.interactionStatus = interactionStatus; } + + /** + * @return the resourceTypeId + */ + public int getResourceTypeId() { + return resourceTypeId; + } + + /** + * @param resourceTypeId the resourceTypeId to set + */ + public void setResourceTypeId(int resourceTypeId) { + this.resourceTypeId = resourceTypeId; + } + + /** + * @return the resourcePayloadKey + */ + public String getResourcePayloadKey() { + return resourcePayloadKey; + } + + /** + * @param resourcePayloadKey the resourcePayloadKey to set + */ + public void setResourcePayloadKey(String resourcePayloadKey) { + this.resourcePayloadKey = resourcePayloadKey; + } } diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/impl/CacheTransactionSync.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/impl/CacheTransactionSync.java index 9e9f9d444bf..cc00ef81b7e 100644 --- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/impl/CacheTransactionSync.java +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/impl/CacheTransactionSync.java @@ -33,14 +33,23 @@ public class CacheTransactionSync implements Synchronization { private final String transactionDataKey; + // A callback when we hit a rollback + private final Runnable rolledBackHandler; + /** * Public constructor - * @param cacheImpl + * + * @param txSyncRegistry + * @param cache + * @param transactionDataKey + * @param rolledBackHandler */ - public CacheTransactionSync(TransactionSynchronizationRegistry txSyncRegistry, FHIRPersistenceJDBCCache cache, String transactionDataKey) { + public CacheTransactionSync(TransactionSynchronizationRegistry txSyncRegistry, FHIRPersistenceJDBCCache cache, String transactionDataKey, + Runnable rolledBackHandler) { this.txSyncRegistry = txSyncRegistry; this.cache = cache; this.transactionDataKey = transactionDataKey; + this.rolledBackHandler = rolledBackHandler; } @Override @@ -66,6 +75,10 @@ public void afterCompletion(int status) { // probably a rollback, so throw away everything logger.info("Transaction failed - afterCompletion(status = " + status + ")"); cache.transactionRolledBack(); + + if (rolledBackHandler != null) { + rolledBackHandler.run(); + } } } } \ No newline at end of file diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/impl/FHIRPersistenceJDBCImpl.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/impl/FHIRPersistenceJDBCImpl.java index f70c906c508..1719eecf4df 100644 --- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/impl/FHIRPersistenceJDBCImpl.java +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/impl/FHIRPersistenceJDBCImpl.java @@ -32,7 +32,6 @@ import java.util.Map.Entry; import java.util.Properties; import java.util.Set; -import java.util.concurrent.Future; import java.util.function.Function; import java.util.logging.Level; import java.util.logging.Logger; @@ -138,6 +137,7 @@ import com.ibm.fhir.persistence.jdbc.dao.impl.TransactionDataImpl; import com.ibm.fhir.persistence.jdbc.dto.CompositeParmVal; import com.ibm.fhir.persistence.jdbc.dto.DateParmVal; +import com.ibm.fhir.persistence.jdbc.dto.ErasedResourceRec; import com.ibm.fhir.persistence.jdbc.dto.ExtractedParameterValue; import com.ibm.fhir.persistence.jdbc.dto.NumberParmVal; import com.ibm.fhir.persistence.jdbc.dto.QuantityParmVal; @@ -156,7 +156,7 @@ import com.ibm.fhir.persistence.jdbc.util.ResourceTypesCache; import com.ibm.fhir.persistence.jdbc.util.TimestampPrefixedUUID; import com.ibm.fhir.persistence.payload.FHIRPayloadPersistence; -import com.ibm.fhir.persistence.payload.PayloadKey; +import com.ibm.fhir.persistence.payload.PayloadPersistenceResponse; import com.ibm.fhir.persistence.util.FHIRPersistenceUtil; import com.ibm.fhir.persistence.util.InputOutputByteStream; import com.ibm.fhir.persistence.util.LogicalIdentityProvider; @@ -228,6 +228,9 @@ public class FHIRPersistenceJDBCImpl implements FHIRPersistence, SchemaNameSuppl // Enable use of legacy whole-system search parameters for the search request private final boolean legacyWholeSystemSearchParamsEnabled; + + // A list of payload persistence responses in case we have a rollback to clean up + private final List payloadPersistenceResponses = new ArrayList<>(); /** * Constructor for use when running as web application in WLP. @@ -270,7 +273,7 @@ public FHIRPersistenceJDBCImpl(FHIRPersistenceJDBCCache cache, FHIRPayloadPersis boolean enableReadOnlyReplicas = fhirConfig.getBooleanProperty(FHIRConfiguration.PROPERTY_JDBC_ENABLE_READ_ONLY_REPLICAS, Boolean.FALSE); this.connectionStrategy = new FHIRDbTenantDatasourceConnectionStrategy(trxSynchRegistry, buildActionChain(), enableReadOnlyReplicas); - this.transactionAdapter = new FHIRUserTransactionAdapter(userTransaction, trxSynchRegistry, cache, TXN_DATA_KEY); + this.transactionAdapter = new FHIRUserTransactionAdapter(userTransaction, trxSynchRegistry, cache, TXN_DATA_KEY, () -> handleRollback()); // Use of legacy whole-system search parameters disabled by default this.legacyWholeSystemSearchParamsEnabled = @@ -365,7 +368,8 @@ protected Action buildActionChain() { @Override public SingleResourceResult create(FHIRPersistenceContext context, T resource) throws FHIRPersistenceException { - // This method is provided for API stability. No longer used. + doCachePrefill(); + // This method is provided for API stability. No longer used. Does not support offloading // Generate a new logical resource id final String logicalId = generateResourceId(); @@ -383,6 +387,7 @@ public SingleResourceResult createWithMeta(FHIRPersisten log.entering(CLASSNAME, METHODNAME); try (Connection connection = openConnection()) { + doCachePrefill(connection); // This create() operation is only called by a REST create. If the given resource // contains an id, then for R4 we need to ignore it and replace it with our @@ -399,7 +404,8 @@ public SingleResourceResult createWithMeta(FHIRPersisten // Create the new Resource DTO instance. com.ibm.fhir.persistence.jdbc.dto.Resource resourceDTO = - createResourceDTO(logicalId, newVersionNumber, lastUpdated, updatedResource); + createResourceDTO(logicalId, newVersionNumber, lastUpdated, updatedResource, + getResourcePayloadKeyFromContext(context)); // The DAO objects are now created on-the-fly (not expensive to construct) and // given the connection to use while processing this request @@ -448,19 +454,38 @@ public SingleResourceResult createWithMeta(FHIRPersisten } } + /** + * Prefill the cache if required + * @throws FHIRPersistenceException + */ + private void doCachePrefill() throws FHIRPersistenceException { + if (cache.needToPrefill()) { + try (Connection connection = openConnection()) { + doCachePrefill(connection); + } catch(FHIRPersistenceException e) { + throw e; + } catch(Throwable e) { + FHIRPersistenceException fx = new FHIRPersistenceException("Cache prefill - unexpected error"); + log.log(Level.SEVERE, fx.getMessage(), e); + throw fx; + } + } + } + /** * Creates and returns a data transfer object (DTO) with the contents of the passed arguments. - * + * * @param logicalId * @param newVersionNumber * @param lastUpdated * @param updatedResource + * @param resourcePayloadKey * @return * @throws IOException * @throws FHIRGeneratorException */ private com.ibm.fhir.persistence.jdbc.dto.Resource createResourceDTO(String logicalId, int newVersionNumber, - Instant lastUpdated, Resource updatedResource) throws IOException, FHIRGeneratorException { + Instant lastUpdated, Resource updatedResource, String resourcePayloadKey) throws IOException, FHIRGeneratorException { Timestamp timestamp = FHIRUtilities.convertToTimestamp(lastUpdated.getValue()); @@ -469,6 +494,7 @@ private com.ibm.fhir.persistence.jdbc.dto.Resource createResourceDTO(String logi resourceDTO.setVersionId(newVersionNumber); resourceDTO.setLastUpdated(timestamp); resourceDTO.setResourceType(updatedResource.getClass().getSimpleName()); + resourceDTO.setResourcePayloadKey(resourcePayloadKey); // Are storing the payload in our RDBMS, or offloading to another store? if (this.payloadPersistence == null) { @@ -514,10 +540,6 @@ private T copyAndSetResourceMetaFields(T resource, String l private ResourceDAO makeResourceDAO(Connection connection) throws FHIRPersistenceDataAccessException, FHIRPersistenceException, IllegalArgumentException { - // The resourceDAO is made before any database interaction, so this is a great spot - // to prefill the caches if needed - doCachePrefill(connection); - if (this.trxSynchRegistry != null) { String datastoreId = FHIRRequestContext.get().getDataStoreId(); return FHIRResourceDAOFactory.getResourceDAO(connection, FhirSchemaConstants.FHIR_ADMIN, @@ -580,6 +602,7 @@ public SingleResourceResult updateWithMeta(FHIRPersisten log.entering(CLASSNAME, METHODNAME); try (Connection connection = openConnection()) { + doCachePrefill(connection); ResourceDAO resourceDao = makeResourceDAO(connection); ParameterDAO parameterDao = makeParameterDAO(connection); @@ -588,7 +611,9 @@ public SingleResourceResult updateWithMeta(FHIRPersisten // Create the new Resource DTO instance. com.ibm.fhir.persistence.jdbc.dto.Resource resourceDTO = - createResourceDTO(resource.getId(), newVersionNumber, resource.getMeta().getLastUpdated(), resource); + createResourceDTO(resource.getId(), newVersionNumber, + resource.getMeta().getLastUpdated(), resource, + getResourcePayloadKeyFromContext(context)); // Persist the Resource DTO. resourceDao.setPersistenceContext(context); @@ -661,6 +686,7 @@ public MultiResourceResult search(FHIRPersistenceContext context, Clas Select query; try (Connection connection = openConnection()) { + doCachePrefill(connection); // For PostgreSQL search queries we need to set some options to ensure better plans connectionStrategy.applySearchOptimizerOptions(connection, SearchUtil.isCompartmentSearch(searchContext)); ResourceDAO resourceDao = makeResourceDAO(connection); @@ -976,9 +1002,18 @@ private List runIncludeQuery(Class lrIds = includeDTOs.stream() .map(r -> Long.toString(r.getLogicalResourceId())).collect(Collectors.toSet()); Map> resultMap = queryResultMap.computeIfAbsent(iterationLevel, k -> new HashMap<>()); - Set resultLogicalResourceIds = resultMap.computeIfAbsent(SearchConstants.INCLUDE.equals(includeType) ? - inclusionParm.getSearchParameterTargetType() : inclusionParm.getJoinResourceType(), k -> new HashSet<>()); + + final String targetResourceType = SearchConstants.INCLUDE.equals(includeType) ? + inclusionParm.getSearchParameterTargetType() : inclusionParm.getJoinResourceType(); + Set resultLogicalResourceIds = resultMap.computeIfAbsent(targetResourceType, k -> new HashSet<>()); resultLogicalResourceIds.addAll(lrIds); + + // Because the resultLogicalResourceIds may contain resources of different types, we need + // to make sure the resourceTypeId is properly marked on each DTO. We could've selected + // that from the database, but we have the info here, so it's easy to inject it and avoid + // pulling another column from the database we don't actually need. + int targetResourceTypeId = getResourceTypeId(targetResourceType); + includeDTOs.forEach(dto -> dto.setResourceTypeId(targetResourceTypeId)); } return includeDTOs; @@ -1026,11 +1061,14 @@ public SingleResourceResult delete(FHIRPersistenceContex final String METHODNAME = "delete"; log.entering(CLASSNAME, METHODNAME); - + // TODO this implementation needs to be updated so that it doesn't + // modify the resource - that now has to be done at the REST layer + // so that we can support payload persistence outside the RDBMS com.ibm.fhir.persistence.jdbc.dto.Resource existingResourceDTO = null; T existingResource = null; try (Connection connection = openConnection()) { + doCachePrefill(connection); ResourceDAO resourceDao = makeResourceDAO(connection); existingResourceDTO = resourceDao.read(logicalId, resourceType.getSimpleName()); @@ -1040,7 +1078,7 @@ public SingleResourceResult delete(FHIRPersistenceContex resourceType.getSimpleName() + "/" + logicalId); } - existingResource = readResource(resourceType, existingResourceDTO, null); + existingResource = convertResourceDTO(existingResourceDTO, resourceType, null); if (existingResourceDTO.isDeleted()) { addWarning(IssueType.DELETED, "Resource of type'" + resourceType.getSimpleName() + @@ -1063,7 +1101,8 @@ public SingleResourceResult delete(FHIRPersistenceContex // Create a new Resource DTO instance to represent the deleted version. com.ibm.fhir.persistence.jdbc.dto.Resource resourceDTO = - createResourceDTO(logicalId, newVersionNumber, lastUpdated, updatedResource); + createResourceDTO(logicalId, newVersionNumber, lastUpdated, updatedResource, + getResourcePayloadKeyFromContext(context)); resourceDTO.setDeleted(true); // Persist the logically deleted Resource DTO. @@ -1100,29 +1139,48 @@ public SingleResourceResult delete(FHIRPersistenceContex } } - /** - * Convert the payload to a resource class from the IBM FHIR Server model. If payloadPersistence has been - * configured, the payload is read from another service. If payloadPersistence is null, then it is expected - * that the payload has been stored in the RDBMS. This function hides that difference. - * @param the type of Resource being returned - * @param resourceType the class type of the resource being read - * @param resourceDTO The data transfer object representing information read from the RDBMS. - * @param elements an optional element filter for the resource - * @return - * @throws FHIRException - * @throws IOException - */ - private T readResource(Class resourceType, com.ibm.fhir.persistence.jdbc.dto.Resource resourceDTO, List elements) throws FHIRException, IOException { - T result; - if (this.payloadPersistence != null) { - // The payload needs to be read from the FHIRPayloadPersistence impl - final int resourceTypeId = cache.getResourceTypeCache().getId(resourceType.getSimpleName()); - result = payloadPersistence.readResource(resourceType, resourceTypeId, resourceDTO.getLogicalId(), resourceDTO.getVersionId(), elements); - } else { - // original impl - the resource was read from the RDBMS - result = convertResourceDTO(resourceDTO, resourceType, elements); + @Override + public void deleteWithMeta(FHIRPersistenceContext context, T resource) throws FHIRPersistenceException { + final String METHODNAME = "deleteWithMeta"; + log.entering(CLASSNAME, METHODNAME); + + try (Connection connection = openConnection()) { + doCachePrefill(connection); + ResourceDAO resourceDao = makeResourceDAO(connection); + + // Create a new Resource DTO instance to represent the deleted version. + int newVersionNumber = Integer.parseInt(resource.getMeta().getVersionId().getValue()); + + // Create the new Resource DTO instance. + com.ibm.fhir.persistence.jdbc.dto.Resource resourceDTO = + createResourceDTO(resource.getId(), newVersionNumber, resource.getMeta().getLastUpdated(), resource, + getResourcePayloadKeyFromContext(context)); + resourceDTO.setDeleted(true); + + // Persist the logically deleted Resource DTO. + resourceDao.setPersistenceContext(context); + resourceDao.insert(resourceDTO, null, null, null, IF_NONE_MATCH_NULL); + + if (log.isLoggable(Level.FINE)) { + log.fine("Deleted FHIR Resource '" + resourceDTO.getResourceType() + "/" + resourceDTO.getLogicalId() + "' id=" + resourceDTO.getId() + + ", version=" + resourceDTO.getVersionId()); + } + } + catch(FHIRPersistenceFKVException e) { + log.log(Level.INFO, this.performCacheDiagnostics()); + throw e; + } + catch(FHIRPersistenceException e) { + throw e; + } + catch(Throwable e) { + FHIRPersistenceException fx = new FHIRPersistenceException("Unexpected error while performing a delete operation."); + log.log(Level.SEVERE, fx.getMessage(), e); + throw fx; + } + finally { + log.exiting(CLASSNAME, METHODNAME); } - return result; } /** @@ -1169,6 +1227,7 @@ public SingleResourceResult read(FHIRPersistenceContext } try (Connection connection = openConnection()) { + doCachePrefill(connection); ResourceDAO resourceDao = makeResourceDAO(connection); resourceDTO = resourceDao.read(logicalId, resourceType.getSimpleName()); @@ -1179,7 +1238,7 @@ public SingleResourceResult read(FHIRPersistenceContext } // Fetch the resource payload if needed and convert to a model object - final T resource = readResource(resourceType, resourceDTO, elements); + final T resource = convertResourceDTO(resourceDTO, resourceType, elements); SingleResourceResult result = new SingleResourceResult.Builder() .success(true) @@ -1217,6 +1276,7 @@ public MultiResourceResult history(FHIRPersistenceContex int offset; try (Connection connection = openConnection()) { + doCachePrefill(connection); ResourceDAO resourceDao = makeResourceDAO(connection); historyContext = context.getHistoryContext(); @@ -1379,6 +1439,7 @@ public SingleResourceResult vread(FHIRPersistenceContext } try (Connection connection = openConnection()) { + doCachePrefill(connection); ResourceDAO resourceDao = makeResourceDAO(connection); version = Integer.parseInt(versionId); @@ -1493,7 +1554,15 @@ protected List convertResourceDTOList(ResourceDAO resourceDao, List convertResourceDTOList(ResourceDAO resourceDao, List List convertResourceDTOList(List resourceDTOList, Class resourceType) throws FHIRException, IOException { - final String METHODNAME = "convertResourceDTO List"; + final String METHODNAME = "convertResourceDTOList"; log.entering(CLASSNAME, METHODNAME); List resources = new ArrayList<>(); @@ -2110,44 +2176,17 @@ protected List convertResourceDTOList(List convertResourceDTOListOld(List resourceDTOList, - Class resourceType) throws FHIRException, IOException { - final String METHODNAME = "convertResourceDTO List"; - log.entering(CLASSNAME, METHODNAME); - - List resources = new ArrayList<>(); - try { - for (com.ibm.fhir.persistence.jdbc.dto.Resource resourceDTO : resourceDTOList) { - resources.add(this.convertResourceDTO(resourceDTO, resourceType, null)); - } - } - finally { + } finally { log.exiting(CLASSNAME, METHODNAME); } return resources; } /** - * Converts the passed Resource Data Transfer Object to a FHIR Resource object. - * @param resourceDTO - A valid Resource DTO + * Converts the passed Resource Data Transfer Object to a FHIR Resource object. The result + * will be null if the resourceDTO passed in is null. + * + * @param resourceDTO - The resource read from the database, or null if the resource doesn't exist * @param resourceType - The FHIR type of resource to be converted. * @param elements - An optional filter for including only specified elements inside a Resource. * @return Resource - A FHIR Resource object representation of the data portion of the passed Resource DTO. @@ -2158,32 +2197,109 @@ private T convertResourceDTO(com.ibm.fhir.persistence.jdbc. Class resourceType, List elements) throws FHIRException, IOException { final String METHODNAME = "convertResourceDTO"; log.entering(CLASSNAME, METHODNAME); - T resource = null; - InputStream in = null; - try { - if (resourceDTO != null && resourceDTO.getDataStream() != null) { - FHIRParser parser = FHIRParser.parser(Format.JSON); - parser.setValidating(false); - in = new GZIPInputStream(resourceDTO.getDataStream().inputStream()); - if (elements != null) { - // parse/filter the resource using elements - resource = parser.as(FHIRJsonParser.class).parseAndFilter(in, elements); - if (resourceType.equals(resource.getClass()) && !FHIRUtil.hasTag(resource, SearchConstants.SUBSETTED_TAG)) { - // add a SUBSETTED tag to this resource to indicate that its elements have been filtered - resource = FHIRUtil.addTag(resource, SearchConstants.SUBSETTED_TAG); + T result; + if (resourceDTO != null) { + if (isOffloadingSupported()) { + // The payload needs to be read from the FHIRPayloadPersistence impl. If this is + // a form of whole-system query (search or history), then the resource type needs + // to come from the DTO itself + String rowResourceTypeName = getResourceTypeInfo(resourceDTO); + int resourceTypeId; + if (rowResourceTypeName != null) { + resourceTypeId = getResourceTypeId(rowResourceTypeName); + } else { + rowResourceTypeName = resourceType.getSimpleName(); + resourceTypeId = getResourceTypeId(resourceType); + } + + // If a specific version of a resource has been deleted using $erase, it + // is possible for the result here to be null. + result = payloadPersistence.readResource(resourceType, rowResourceTypeName, resourceTypeId, resourceDTO.getLogicalId(), resourceDTO.getVersionId(), resourceDTO.getResourcePayloadKey(), elements); + } else { + // original impl - the resource, if any, was read from the RDBMS + if (resourceDTO.getDataStream() != null) { + try (InputStream in = new GZIPInputStream(resourceDTO.getDataStream().inputStream())) { + FHIRParser parser = FHIRParser.parser(Format.JSON); + parser.setValidating(false); + if (elements != null) { + // parse/filter the resource using elements + result = parser.as(FHIRJsonParser.class).parseAndFilter(in, elements); + if (resourceType.equals(result.getClass()) && !FHIRUtil.hasTag(result, SearchConstants.SUBSETTED_TAG)) { + // add a SUBSETTED tag to this resource to indicate that its elements have been filtered + result = FHIRUtil.addTag(result, SearchConstants.SUBSETTED_TAG); + } + } else { + result = parser.parse(in); + } } } else { - resource = parser.parse(in); + // Null DATA column means that this resource version was probably removed + // by $erase + result = null; } } - } finally { - if (in != null) { - in.close(); + } else { + // resource doesn't exist + result = null; + } + + log.exiting(CLASSNAME, METHODNAME); + return result; + } + + /** + * Get the resource type name of the resource represented by the from the + * given resourceDTO. This is only done if the resourceTypeId field in the + * resourceDTO has been set. If not, returns null. + * @param resourceDTO + * @throws FHIRPersistenceException if the resourceTypeId is set + * but the value cannot be found in the cache + * @return + */ + private String getResourceTypeInfo(com.ibm.fhir.persistence.jdbc.dto.Resource resourceDTO) + throws FHIRPersistenceException { + final String result; + // resource type name needs to be derived from the resourceTypeId returned by the DB select query + log.fine(() -> "getResourceTypeInfo(" + resourceDTO.getResourceTypeId() + ")"); + int resourceTypeId = resourceDTO.getResourceTypeId(); + if (resourceTypeId >= 0) { + result = cache.getResourceTypeNameCache().getName(resourceTypeId); + if (result == null) { + // the cache is preloaded, so this should never happen + log.severe("No entry found in cache for resourceTypeId = " + resourceTypeId); + throw new FHIRPersistenceException("Resource type not found in cache"); } + } else { + result = null; + } - log.exiting(CLASSNAME, METHODNAME); + return result; + } + + /** + * Get the database resourceTypeId from the cache. + * @param resourceType + * @return + * @throws FHIRPersistenceException if the resource type is not found in the cache. + */ + private int getResourceTypeId(Class resourceType) throws FHIRPersistenceException { + return getResourceTypeId(resourceType.getSimpleName()); + } + + /** + * Get the database resourceTypeId from the cache. + * @param resourceTypeName + * @return + * @throws FHIRPersistenceException if the resource type is not found in the cache. + */ + private int getResourceTypeId(String resourceTypeName) throws FHIRPersistenceException { + final Integer resourceTypeId = cache.getResourceTypeCache().getId(resourceTypeName); + if (resourceTypeId == null) { + // the cache is preloaded, so this should never happen + log.severe("Resource type missing from resource type cache: '" + resourceTypeName + "'"); + throw new FHIRPersistenceException("Resource type id not found in resource type cache"); } - return resource; + return resourceTypeId; } @Override @@ -2280,13 +2396,14 @@ public String getSchemaForRequestContext(Connection connection) throws FHIRPersi public void doCachePrefill(Connection connection) throws FHIRPersistenceException { // Perform the cache prefill just once (for a given tenant). This isn't synchronous, so // there's a chance for other threads to slip in before the prefill completes. Those threads - // just end up having cache-misses for the names they need. + // just end up repeating the prefill - a little extra work one time to avoid unnecessary locking // Note - this is done as the first thing in a transaction so there's no concern about reading // uncommitted values. if (cache.needToPrefill()) { ResourceDAO resourceDao = makeResourceDAO(connection); ParameterDAO parameterDao = makeParameterDAO(connection); FHIRPersistenceJDBCCacheUtil.prefill(resourceDao, parameterDao, cache); + cache.clearNeedToPrefill(); } } @@ -2295,6 +2412,11 @@ public boolean isReindexSupported() { return true; } + @Override + public boolean isOffloadingSupported() { + return this.payloadPersistence != null; + } + @Override public int reindex(FHIRPersistenceContext context, OperationOutcome.Builder operationOutcomeResult, java.time.Instant tstamp, List indexIds, String resourceLogicalId) throws FHIRPersistenceException { @@ -2319,6 +2441,7 @@ public int reindex(FHIRPersistenceContext context, OperationOutcome.Builder oper } try (Connection connection = openConnection()) { + doCachePrefill(connection); ResourceDAO resourceDao = makeResourceDAO(connection); ParameterDAO parameterDao = makeParameterDAO(connection); ReindexResourceDAO reindexDAO = FHIRResourceDAOFactory.getReindexResourceDAO(connection, FhirSchemaConstants.FHIR_ADMIN, schemaNameSupplier.getSchemaForRequestContext(connection), connectionStrategy.getFlavor(), this.trxSynchRegistry, this.cache, parameterDao); @@ -2366,7 +2489,7 @@ public int reindex(FHIRPersistenceContext context, OperationOutcome.Builder oper com.ibm.fhir.persistence.jdbc.dto.Resource existingResourceDTO = resourceDao.read(rir.getLogicalId(), rir.getResourceType()); if (existingResourceDTO != null && !existingResourceDTO.isDeleted()) { rir.setDeleted(false); // just to be clear - Class resourceTypeClass = getResourceType(resourceType); + Class resourceTypeClass = getResourceType(rir.getResourceType()); reindexDAO.setPersistenceContext(context); updateParameters(rir, resourceTypeClass, existingResourceDTO, reindexDAO, operationOutcomeResult); @@ -2493,6 +2616,34 @@ private ParameterTransactionDataImpl getTransactionDataForDatasource(String data return result; } + + /** + * Callback from TransactionData when a transaction has been rolled back + * @param payloadPersistenceResponses an immutable list of {@link PayloadPersistenceResponse} + */ + private void handleRollback() { + if (payloadPersistenceResponses.size() > 0 && payloadPersistence == null) { + throw new IllegalStateException("handleRollback called but payloadPersistence is not configured"); + } + // try to delete each of the payload objects we've stored + // because the transaction has been rolled back + log.fine("starting rollback handling for PayloadPersistenceResponse data"); + for (PayloadPersistenceResponse ppr: payloadPersistenceResponses) { + try { + log.fine(() -> "tx rollback - deleting payload: " + ppr.toString()); + payloadPersistence.deletePayload(ppr.getResourceTypeName(), ppr.getResourceTypeId(), + ppr.getLogicalId(), ppr.getVersionId(), ppr.getResourcePayloadKey()); + } catch (Exception x) { + // Nothing more we can do other than log the issue. Any rows we can't process + // here (e.g. network outage) will be orphaned. These orphaned rows + // will be removed by the reconciliation process which scans the payload + // persistence repository and looks for missing RDBMS records. + log.log(Level.SEVERE, "rollback failed to delete payload: " + ppr.toString(), x); + } + } + + payloadPersistenceResponses.clear(); + } /** * Factory function to create a new instance of the TransactionData implementation @@ -2534,6 +2685,7 @@ public void persistResourceTokenValueRecords(Collection r public ResourcePayload fetchResourcePayloads(Class resourceType, java.time.Instant fromLastModified, java.time.Instant toLastModified, Function processor) throws FHIRPersistenceException { try (Connection connection = openConnection()) { + doCachePrefill(connection); // translator is required to handle some simple SQL syntax differences. This is easier // than creating separate DAO implementations for each database type IDatabaseTranslator translator = FHIRResourceDAOFactory.getTranslatorForFlavor(connectionStrategy.getFlavor()); @@ -2603,10 +2755,22 @@ public ResourceEraseRecord erase(EraseDTO eraseDto) throws FHIRPersistenceExcept ResourceEraseRecord eraseRecord = new ResourceEraseRecord(); try (Connection connection = openConnection()) { + doCachePrefill(connection); IDatabaseTranslator translator = FHIRResourceDAOFactory.getTranslatorForFlavor(connectionStrategy.getFlavor()); IResourceReferenceDAO rrd = makeResourceReferenceDAO(connection); - EraseResourceDAO eraseDao = new EraseResourceDAO(connection, translator, schemaNameSupplier.getSchemaForRequestContext(connection), connectionStrategy.getFlavor(), this.cache, rrd); - eraseDao.erase(eraseRecord, eraseDto); + EraseResourceDAO eraseDao = new EraseResourceDAO(connection, FhirSchemaConstants.FHIR_ADMIN, translator, + schemaNameSupplier.getSchemaForRequestContext(connection), + connectionStrategy.getFlavor(), this.cache, rrd); + long eraseResourceGroupId = eraseDao.erase(eraseRecord, eraseDto); + + // If offloading is enabled, we need to remove the corresponding offloaded resource payloads + if (isOffloadingSupported()) { + erasePayloads(eraseDao, eraseResourceGroupId); + } else { + // clean up the erased_resources records because they're no longer needed + eraseDao.clearErasedResourcesInGroup(eraseResourceGroupId); + } + } catch(FHIRPersistenceResourceNotFoundException e) { throw e; } catch(FHIRPersistenceException e) { @@ -2639,6 +2803,42 @@ public ResourceEraseRecord erase(EraseDTO eraseDto) throws FHIRPersistenceExcept return eraseRecord; } + /** + * Delete all the offloaded payload entries which have been identified for deletion + * @param dao + * @param erasedResourceGroupId + */ + private void erasePayloads(EraseResourceDAO dao, long erasedResourceGroupId) throws FHIRPersistenceException { + List recs = dao.getErasedResourceRecords(erasedResourceGroupId); + for (ErasedResourceRec rec: recs) { + erasePayload(rec); + } + + // If the above loop completed without throwing an exception, we can safely + // remove all the records in the group. If an exception was thrown (because + // the offload persistence layer was not accessible), don't delete right now + // just in case we want the tx to commit anyway, allowing for async cleanup + // by the reconciliation process + dao.clearErasedResourcesInGroup(erasedResourceGroupId); + } + + /** + * Erase the payload for the resource described by rec + * @param rec + */ + private void erasePayload(ErasedResourceRec rec) throws FHIRPersistenceException { + String resourceType = cache.getResourceTypeNameCache().getName(rec.getResourceTypeId()); + if (resourceType == null) { + throw new FHIRPersistenceException("Resource type not found in cache for resourceTypeId=" + rec.getResourceTypeId()); + } + + // Note that if versionId is null, it means delete all known versions + // The resourcePayloadKey is always null here, because the intention + // for erase is to delete all instances of the record (in the rare case + // there may be orphaned records from failed transactions) + payloadPersistence.deletePayload(resourceType, rec.getResourceTypeId(), rec.getLogicalId(), rec.getVersionId(), null); + } + private boolean allSearchParmsAreGlobal(List queryParms) { for (QueryParameter queryParm : queryParms) { if (!SearchConstants.SYSTEM_LEVEL_GLOBAL_PARAMETER_NAMES.contains(queryParm.getCode())) { @@ -2675,19 +2875,34 @@ public boolean isUpdateCreateEnabled() { } @Override - public Future storePayload(Resource resource, String logicalId, int newVersionNumber) throws FHIRPersistenceException { - if (payloadPersistence != null) { + public PayloadPersistenceResponse storePayload(Resource resource, String logicalId, int newVersionNumber, String resourcePayloadKey) throws FHIRPersistenceException { + if (isOffloadingSupported()) { + doCachePrefill(); // just in case we're called before any other database interaction (can happen) final String resourceTypeName = resource.getClass().getSimpleName(); - int resourceTypeId = cache.getResourceTypeCache().getId(resourceTypeName); + int resourceTypeId = getResourceTypeId(resourceTypeName); // Delegate the serialization and any compression to the FHIRPayloadPersistence implementation - return payloadPersistence.storePayload(resourceTypeName, resourceTypeId, logicalId, newVersionNumber, resource); + PayloadPersistenceResponse response = payloadPersistence.storePayload(resourceTypeName, resourceTypeId, logicalId, newVersionNumber, resourcePayloadKey, resource); + + // register the response object so that we can clean up in case of a rollback later + this.payloadPersistenceResponses.add(response); + return response; } else { // Offloading not supported by the plain JDBC persistence implementation, so return null return null; } } + /** + * Get the resource payload key value from the given context if offloading + * is supported and configured. Returns null otherwise. + * @param context + * @return + */ + private String getResourcePayloadKeyFromContext(FHIRPersistenceContext context) { + return context.getOffloadResponse() == null ? null : context.getOffloadResponse().getResourcePayloadKey(); + } + @Override public List readResourcesForRecords(List records) throws FHIRPersistenceException { // TODO support async read from payloadPersistence after issue #2900 is merged. diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresResourceDAO.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresResourceDAO.java index 7e52cd48678..ed78cebb77a 100644 --- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresResourceDAO.java +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresResourceDAO.java @@ -54,7 +54,7 @@ public class PostgresResourceDAO extends ResourceDAOImpl { private static final String SQL_READ_RESOURCE_TYPE = "{CALL %s.add_resource_type(?, ?)}"; // 13 args (9 in, 4 out) - private static final String SQL_INSERT_WITH_PARAMETERS = "{CALL %s.add_any_resource(?,?,?,?,?,?,?,?,?,?,?,?,?)}"; + private static final String SQL_INSERT_WITH_PARAMETERS = "{CALL %s.add_any_resource(?,?,?,?,?,?,?,?,?,?,?,?,?,?)}"; // DAO used to obtain sequence values from FHIR_REF_SEQUENCE private FhirRefSequenceDAO fhirRefSequenceDAO; @@ -102,7 +102,13 @@ public Resource insert(Resource resource, List paramete stmt = connection.prepareCall(stmtString); stmt.setString(1, resource.getResourceType()); stmt.setString(2, resource.getLogicalId()); - stmt.setBinaryStream(3, resource.getDataStream().inputStream()); + + if (resource.getDataStream() != null) { + stmt.setBinaryStream(3, resource.getDataStream().inputStream()); + } else { + // payload was offloaded to another data store + stmt.setNull(3, Types.BINARY); + } lastUpdated = resource.getLastUpdated(); stmt.setTimestamp(4, lastUpdated, CalendarHelper.getCalendarForUTC()); @@ -111,21 +117,22 @@ public Resource insert(Resource resource, List paramete stmt.setInt(7, resource.getVersionId()); stmt.setString(8, parameterHashB64); setInt(stmt, 9, ifNoneMatch); - stmt.registerOutParameter(10, Types.BIGINT); - stmt.registerOutParameter(11, Types.VARCHAR); // The old parameter_hash - stmt.registerOutParameter(12, Types.INTEGER); // o_interaction_status - stmt.registerOutParameter(13, Types.INTEGER); // o_if_none_match_version + setString(stmt, 10, resource.getResourcePayloadKey()); + stmt.registerOutParameter(11, Types.BIGINT); + stmt.registerOutParameter(12, Types.VARCHAR); // The old parameter_hash + stmt.registerOutParameter(13, Types.INTEGER); // o_interaction_status + stmt.registerOutParameter(14, Types.INTEGER); // o_if_none_match_version dbCallStartTime = System.nanoTime(); stmt.execute(); dbCallDuration = (System.nanoTime()-dbCallStartTime)/1e6; - resource.setId(stmt.getLong(10)); + resource.setId(stmt.getLong(11)); - if (stmt.getInt(12) == 1) { + if (stmt.getInt(13) == 1) { // interaction status // no change, so skip parameter updates resource.setInteractionStatus(InteractionStatus.IF_NONE_MATCH_EXISTED); - resource.setIfNoneMatchVersion(stmt.getInt(13)); // current version + resource.setIfNoneMatchVersion(stmt.getInt(14)); // current version } else { resource.setInteractionStatus(InteractionStatus.MODIFIED); @@ -133,7 +140,7 @@ public Resource insert(Resource resource, List paramete // To keep things simple for the postgresql use-case, we just use a visitor to // handle inserts of parameters directly in the resource parameter tables. // Note we don't get any parameters for the resource soft-delete operation - final String currentParameterHash = stmt.getString(11); + final String currentParameterHash = stmt.getString(12); if (parameters != null && (parameterHashB64 == null || parameterHashB64.isEmpty() || !parameterHashB64.equals(currentParameterHash))) { // postgresql doesn't support partitioned multi-tenancy, so we disable it on the DAO: diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresResourceNoProcDAO.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresResourceNoProcDAO.java index 82b745f9653..9fbedacc1d9 100644 --- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresResourceNoProcDAO.java +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/postgres/PostgresResourceNoProcDAO.java @@ -125,6 +125,7 @@ public Resource insert(Resource resource, List paramete connection, parameterDao, ifNoneMatch, + resource.getResourcePayloadKey(), outInteractionStatus, outIfNoneMatchVersion ); @@ -205,7 +206,7 @@ public Resource insert(Resource resource, List paramete public long storeResource(String tablePrefix, List parameters, String p_logical_id, InputStream p_payload, Timestamp p_last_updated, boolean p_is_deleted, String p_source_key, Integer p_version, String parameterHashB64, Connection conn, - ParameterDAO parameterDao, Integer ifNoneMatch, + ParameterDAO parameterDao, Integer ifNoneMatch, String resourcePayloadKey, AtomicInteger outInteractionStatus, AtomicInteger outIfNoneMatchVersion) throws Exception { final String METHODNAME = "storeResource() for " + tablePrefix + " resource"; @@ -390,8 +391,8 @@ public long storeResource(String tablePrefix, List para } // Finally we get to the big resource data insert - String sql3 = "INSERT INTO " + tablePrefix + "_resources (resource_id, logical_resource_id, version_id, data, last_updated, is_deleted) " - + "VALUES (?,?,?,?,?,?)"; + String sql3 = "INSERT INTO " + tablePrefix + "_resources (resource_id, logical_resource_id, version_id, data, last_updated, is_deleted, resource_payload_key) " + + "VALUES (?,?,?,?,?,?,?)"; try (PreparedStatement stmt = conn.prepareStatement(sql3)) { // bind parameters stmt.setLong(1, v_resource_id); @@ -400,6 +401,7 @@ public long storeResource(String tablePrefix, List para stmt.setBinaryStream(4, p_payload); stmt.setTimestamp(5, p_last_updated, UTC); stmt.setString(6, p_is_deleted ? "Y" : "N"); + setString(stmt, 7, resourcePayloadKey); stmt.executeUpdate(); } diff --git a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/util/NewQueryBuilder.java b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/util/NewQueryBuilder.java index fc887c25bd6..1483ac1ef09 100644 --- a/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/util/NewQueryBuilder.java +++ b/fhir-persistence-jdbc/src/main/java/com/ibm/fhir/persistence/jdbc/util/NewQueryBuilder.java @@ -266,7 +266,8 @@ public Select buildQuery(Class resourceType, FHIRSearchContext searchContext) // Create a domain model for each resource type List subDomainModels = new ArrayList<>(); for (String domainResourceType : resourceTypes) { - SearchQuery subDomainModel = new SearchDataQuery(domainResourceType, false, false); + int domainResourceTypeId = identityCache.getResourceTypeId(domainResourceType); + SearchQuery subDomainModel = new SearchDataQuery(domainResourceType, false, false, domainResourceTypeId); buildModelCommon(subDomainModel, ModelSupport.getResourceType(domainResourceType), searchContext); subDomainModels.add(subDomainModel); } @@ -387,7 +388,7 @@ public Select buildWholeSystemDataQuery(FHIRSearchContext searchContext, Map logicalResourceIds = resourceTypeIdToLogicalResourceIdMap.get(resourceTypeId); - SearchQuery subDomainModel = new SearchWholeSystemDataQuery(resourceType); + SearchQuery subDomainModel = new SearchWholeSystemDataQuery(resourceType, resourceTypeId); subDomainModel.add(new WholeSystemDataExtension(resourceType, logicalResourceIds)); buildModelCommon(subDomainModel, ModelSupport.getResourceType(resourceType), searchContext); subDomainModels.add(subDomainModel); diff --git a/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/connection/FHIRUserTransactionAdapterTest.java b/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/connection/FHIRUserTransactionAdapterTest.java index cfcb1c12ce0..fc40f8557ae 100644 --- a/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/connection/FHIRUserTransactionAdapterTest.java +++ b/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/connection/FHIRUserTransactionAdapterTest.java @@ -27,7 +27,7 @@ public void testStandardFlow() throws Exception { MockUserTransaction tx = new MockUserTransaction(); MockTransactionSynchronizationRegistry sync = new MockTransactionSynchronizationRegistry(); assertEquals(tx.getStatus(), Status.STATUS_NO_TRANSACTION); - FHIRUserTransactionAdapter adapter = new FHIRUserTransactionAdapter(tx, sync, null, null); + FHIRUserTransactionAdapter adapter = new FHIRUserTransactionAdapter(tx, sync, null, null, null); assertFalse(adapter.hasBegun()); adapter.begin(); assertTrue(adapter.hasBegun()); @@ -42,7 +42,7 @@ public void testRepeatFlow() throws Exception { MockUserTransaction tx = new MockUserTransaction(); MockTransactionSynchronizationRegistry sync = new MockTransactionSynchronizationRegistry(); assertEquals(tx.getStatus(), Status.STATUS_NO_TRANSACTION); - FHIRUserTransactionAdapter adapter = new FHIRUserTransactionAdapter(tx, sync, null, null); + FHIRUserTransactionAdapter adapter = new FHIRUserTransactionAdapter(tx, sync, null, null, null); adapter.begin(); assertEquals(tx.getStatus(), Status.STATUS_ACTIVE); adapter.end(); @@ -62,7 +62,7 @@ public void testNestedFlow() throws Exception { MockUserTransaction tx = new MockUserTransaction(); MockTransactionSynchronizationRegistry sync = new MockTransactionSynchronizationRegistry(); assertEquals(tx.getStatus(), Status.STATUS_NO_TRANSACTION); - FHIRUserTransactionAdapter adapter = new FHIRUserTransactionAdapter(tx, sync, null, null); + FHIRUserTransactionAdapter adapter = new FHIRUserTransactionAdapter(tx, sync, null, null, null); adapter.begin(); assertEquals(tx.getStatus(), Status.STATUS_ACTIVE); @@ -82,7 +82,7 @@ public void testNestedBeginAfterRollbackOnly() throws Exception { MockUserTransaction tx = new MockUserTransaction(); MockTransactionSynchronizationRegistry sync = new MockTransactionSynchronizationRegistry(); assertEquals(tx.getStatus(), Status.STATUS_NO_TRANSACTION); - FHIRUserTransactionAdapter adapter = new FHIRUserTransactionAdapter(tx, sync, null, null); + FHIRUserTransactionAdapter adapter = new FHIRUserTransactionAdapter(tx, sync, null, null, null); adapter.begin(); assertEquals(tx.getStatus(), Status.STATUS_ACTIVE); @@ -107,7 +107,7 @@ public void testNestedRollbackOnly() throws Exception { MockUserTransaction tx = new MockUserTransaction(); MockTransactionSynchronizationRegistry sync = new MockTransactionSynchronizationRegistry(); assertEquals(tx.getStatus(), Status.STATUS_NO_TRANSACTION); - FHIRUserTransactionAdapter adapter = new FHIRUserTransactionAdapter(tx, sync, null, null); + FHIRUserTransactionAdapter adapter = new FHIRUserTransactionAdapter(tx, sync, null, null, null); adapter.begin(); assertEquals(tx.getStatus(), Status.STATUS_ACTIVE); @@ -135,11 +135,11 @@ public void sharedNestedRollback() throws Exception { MockUserTransaction tx = new MockUserTransaction(); MockTransactionSynchronizationRegistry sync = new MockTransactionSynchronizationRegistry(); - FHIRUserTransactionAdapter adapter = new FHIRUserTransactionAdapter(tx, sync, null, null); + FHIRUserTransactionAdapter adapter = new FHIRUserTransactionAdapter(tx, sync, null, null, null); adapter.begin(); assertEquals(tx.getStatus(), Status.STATUS_ACTIVE); - FHIRUserTransactionAdapter nested = new FHIRUserTransactionAdapter(tx, sync, null, null); + FHIRUserTransactionAdapter nested = new FHIRUserTransactionAdapter(tx, sync, null, null, null); nested.begin(); assertEquals(tx.getStatus(), Status.STATUS_ACTIVE); @@ -167,7 +167,7 @@ public void sharedNestedCommit() throws Exception { MockUserTransaction tx = new MockUserTransaction(); MockTransactionSynchronizationRegistry sync = new MockTransactionSynchronizationRegistry(); - FHIRUserTransactionAdapter adapter = new FHIRUserTransactionAdapter(tx, sync, null, null); + FHIRUserTransactionAdapter adapter = new FHIRUserTransactionAdapter(tx, sync, null, null, null); assertFalse(adapter.hasBegun()); adapter.begin(); assertTrue(adapter.hasBegun()); @@ -177,7 +177,7 @@ public void sharedNestedCommit() throws Exception { assertTrue(adapter.hasBegun()); assertEquals(tx.getStatus(), Status.STATUS_ACTIVE); - FHIRUserTransactionAdapter nested = new FHIRUserTransactionAdapter(tx, sync, null, null); + FHIRUserTransactionAdapter nested = new FHIRUserTransactionAdapter(tx, sync, null, null, null); nested.begin(); assertTrue(adapter.hasBegun()); assertEquals(tx.getStatus(), Status.STATUS_ACTIVE); @@ -212,14 +212,14 @@ public void sharedNestedAfterRollback() throws Exception { MockUserTransaction tx = new MockUserTransaction(); MockTransactionSynchronizationRegistry sync = new MockTransactionSynchronizationRegistry(); - FHIRUserTransactionAdapter adapter = new FHIRUserTransactionAdapter(tx, sync, null, null); + FHIRUserTransactionAdapter adapter = new FHIRUserTransactionAdapter(tx, sync, null, null, null); adapter.begin(); adapter.setRollbackOnly(); assertTrue(adapter.hasBegun()); assertEquals(tx.getStatus(), Status.STATUS_MARKED_ROLLBACK); // now try and start a nested transaction - FHIRUserTransactionAdapter nested = new FHIRUserTransactionAdapter(tx, sync, null, null); + FHIRUserTransactionAdapter nested = new FHIRUserTransactionAdapter(tx, sync, null, null, null); nested.begin(); assertEquals(tx.getStatus(), Status.STATUS_MARKED_ROLLBACK); @@ -248,7 +248,7 @@ public void testBulkFlow() throws Exception { MockUserTransaction tx = new MockUserTransaction(); MockTransactionSynchronizationRegistry sync = new MockTransactionSynchronizationRegistry(); assertEquals(tx.getStatus(), Status.STATUS_NO_TRANSACTION); - FHIRUserTransactionAdapter adapter = new FHIRUserTransactionAdapter(tx, sync, null, null); + FHIRUserTransactionAdapter adapter = new FHIRUserTransactionAdapter(tx, sync, null, null, null); adapter.begin(); assertEquals(tx.getStatus(), Status.STATUS_ACTIVE); adapter.end(); diff --git a/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/erase/EraseTestMain.java b/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/erase/EraseTestMain.java index 6753ab5787b..141979182ab 100644 --- a/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/erase/EraseTestMain.java +++ b/fhir-persistence-jdbc/src/test/java/com/ibm/fhir/persistence/jdbc/test/erase/EraseTestMain.java @@ -33,6 +33,7 @@ import com.ibm.fhir.persistence.jdbc.dao.api.IIdNameCache; import com.ibm.fhir.persistence.jdbc.dao.api.INameIdCache; import com.ibm.fhir.schema.app.util.CommonUtil; +import com.ibm.fhir.schema.control.FhirSchemaConstants; /** * EraseTestMain is a test driver for the EraseResourceDAO so it can be debugged during development. @@ -87,7 +88,7 @@ protected void erase() throws Exception { System.out.println("Got a Connection"); try { FHIRDbFlavor flavor = new FHIRDbFlavorImpl(dbType, true); - EraseResourceDAO dao = new EraseResourceDAO(c, translator, schemaName, flavor, new MockLocalCache(), null); + EraseResourceDAO dao = new EraseResourceDAO(c, FhirSchemaConstants.FHIR_ADMIN, translator, schemaName, flavor, new MockLocalCache(), null); ResourceEraseRecord record = new ResourceEraseRecord(); EraseDTO eraseDto = new EraseDTO(); @@ -175,6 +176,11 @@ public boolean needToPrefill() { return false; } + @Override + public void clearNeedToPrefill() { + // TODO Auto-generated method stub + } + @Override public ICommonTokenValuesCache getResourceReferenceCache() { return null; diff --git a/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirResourceTableGroup.java b/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirResourceTableGroup.java index c137ef131c8..1fd848c4e1b 100644 --- a/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirResourceTableGroup.java +++ b/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirResourceTableGroup.java @@ -53,6 +53,7 @@ import static com.ibm.fhir.schema.control.FhirSchemaConstants.QUANTITY_VALUE_LOW; import static com.ibm.fhir.schema.control.FhirSchemaConstants.REF_VERSION_ID; import static com.ibm.fhir.schema.control.FhirSchemaConstants.RESOURCE_ID; +import static com.ibm.fhir.schema.control.FhirSchemaConstants.RESOURCE_PAYLOAD_KEY; import static com.ibm.fhir.schema.control.FhirSchemaConstants.RESOURCE_TOKEN_REFS; import static com.ibm.fhir.schema.control.FhirSchemaConstants.RESOURCE_TYPES; import static com.ibm.fhir.schema.control.FhirSchemaConstants.RESOURCE_TYPE_ID; @@ -61,6 +62,7 @@ import static com.ibm.fhir.schema.control.FhirSchemaConstants.STR_VALUE_LCASE; import static com.ibm.fhir.schema.control.FhirSchemaConstants.TAGS; import static com.ibm.fhir.schema.control.FhirSchemaConstants.TOKEN_VALUES_V; +import static com.ibm.fhir.schema.control.FhirSchemaConstants.UUID_LEN; import static com.ibm.fhir.schema.control.FhirSchemaConstants.VERSION; import static com.ibm.fhir.schema.control.FhirSchemaConstants.VERSION_BYTES; import static com.ibm.fhir.schema.control.FhirSchemaConstants.VERSION_ID; @@ -334,14 +336,16 @@ public void addResources(List group, String prefix) { final String tableName = prefix + _RESOURCES; Table tbl = Table.builder(schemaName, tableName) + .setVersion(FhirSchemaVersion.V0024.vid()) .setTenantColumnName(MT_ID) .addTag(FhirSchemaTags.RESOURCE_TYPE, prefix) - .addBigIntColumn( RESOURCE_ID, false) - .addBigIntColumn(LOGICAL_RESOURCE_ID, false) - .addIntColumn( VERSION_ID, false) - .addTimestampColumn( LAST_UPDATED, false) - .addCharColumn( IS_DELETED, 1, false) - .addBlobColumn( DATA, 2147483647, 10240, true) + .addBigIntColumn( RESOURCE_ID, false) + .addBigIntColumn( LOGICAL_RESOURCE_ID, false) + .addIntColumn( VERSION_ID, false) + .addTimestampColumn( LAST_UPDATED, false) + .addCharColumn( IS_DELETED, 1, false) + .addBlobColumn( DATA, 2147483647, 10240, true) + .addVarcharColumn(RESOURCE_PAYLOAD_KEY, UUID_LEN, true) // new column for V0024 .addUniqueIndex(tableName + "_PRF_IN1", prfIndexCols, prfIncludeCols) .addIndex(IDX + tableName + LOGICAL_RESOURCE_ID, LOGICAL_RESOURCE_ID) .addPrimaryKey(tableName + "_PK", RESOURCE_ID) @@ -350,8 +354,15 @@ public void addResources(List group, String prefix) { .enableAccessControl(this.sessionVariable) .addMigration(priorVersion -> { List statements = new ArrayList<>(); - // Intentionally a NOP - return statements; + if (priorVersion < FhirSchemaVersion.V0024.vid()) { + // Migration steps for the V0024 schema version + List cols = ColumnDefBuilder.builder() + .addVarcharColumn(RESOURCE_PAYLOAD_KEY, UUID_LEN, true) + .buildColumns(); + + statements.add(new AddColumn(schemaName, tableName, cols.get(0))); + } + return statements; }) .build(model); diff --git a/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirSchemaConstants.java b/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirSchemaConstants.java index b1db2c3e5a2..cb07d4f8c75 100644 --- a/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirSchemaConstants.java +++ b/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirSchemaConstants.java @@ -117,7 +117,9 @@ public class FhirSchemaConstants { public static final String CODE_SYSTEM_NAME = "CODE_SYSTEM_NAME"; public static final String TOKEN_VALUE = "TOKEN_VALUE"; public static final String COMPOSITE_ID = "COMPOSITE_ID"; - + public static final String RESOURCE_PAYLOAD_KEY = "RESOURCE_PAYLOAD_KEY"; + public static final int UUID_LEN = 36; + public static final String RESOURCE_TYPES = "RESOURCE_TYPES"; public static final String RESOURCE_TYPE = "RESOURCE_TYPE"; public static final String RESOURCE_TYPE_ID = "RESOURCE_TYPE_ID"; @@ -185,4 +187,9 @@ public class FhirSchemaConstants { public static final String PG_FILLFACTOR_PROP = "fillfactor"; public static final int PG_FILLFACTOR_VALUE = 90; // do not change without bumping schema versions for affected tables + + // Support for $erase operation + public static final String ERASED_RESOURCES = "ERASED_RESOURCES"; + public static final String ERASED_RESOURCE_ID = "ERASED_RESOURCE_ID"; + public static final String ERASED_RESOURCE_GROUP_ID = "ERASED_RESOURCE_GROUP_ID"; } \ No newline at end of file diff --git a/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirSchemaGenerator.java b/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirSchemaGenerator.java index 4be36675741..fb0ad74194a 100644 --- a/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirSchemaGenerator.java +++ b/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirSchemaGenerator.java @@ -22,6 +22,9 @@ import static com.ibm.fhir.schema.control.FhirSchemaConstants.DATE_START; import static com.ibm.fhir.schema.control.FhirSchemaConstants.DATE_VALUES; import static com.ibm.fhir.schema.control.FhirSchemaConstants.DATE_VALUE_DROPPED_COLUMN; +import static com.ibm.fhir.schema.control.FhirSchemaConstants.ERASED_RESOURCES; +import static com.ibm.fhir.schema.control.FhirSchemaConstants.ERASED_RESOURCE_GROUP_ID; +import static com.ibm.fhir.schema.control.FhirSchemaConstants.ERASED_RESOURCE_ID; import static com.ibm.fhir.schema.control.FhirSchemaConstants.FHIR_REF_SEQUENCE; import static com.ibm.fhir.schema.control.FhirSchemaConstants.FHIR_SEQUENCE; import static com.ibm.fhir.schema.control.FhirSchemaConstants.FK; @@ -392,6 +395,7 @@ public void buildSchema(PhysicalDataModel model) { addLogicalResourceProfiles(model); // V0014 addLogicalResourceTags(model); // V0014 addLogicalResourceSecurity(model); // V0016 + addErasedResources(model); // V0023 Table globalStrValues = addResourceStrValues(model); // for system-level _profile parameters Table globalDateValues = addResourceDateValues(model); // for system-level date parameters @@ -545,7 +549,7 @@ public void addLogicalResources(PhysicalDataModel pdm) { .addBigIntColumn(REINDEX_TXID, false, "0") // new column for V0006 .addTimestampColumn(LAST_UPDATED, true) // new column for V0014 .addCharColumn(IS_DELETED, 1, false, "'X'") - .addVarcharColumn(PARAMETER_HASH, PARAMETER_HASH_BYTES, true) // new column for V0015 + .addVarcharColumn(PARAMETER_HASH, PARAMETER_HASH_BYTES, true) // new column for V0015 .addPrimaryKey(tableName + "_PK", LOGICAL_RESOURCE_ID) .addUniqueIndex("UNQ_" + LOGICAL_RESOURCES, RESOURCE_TYPE_ID, LOGICAL_ID) .addIndex(IDX_LOGICAL_RESOURCES_RITS, new OrderedColumnDef(REINDEX_TSTAMP, OrderedColumnDef.Direction.DESC, null)) @@ -1279,6 +1283,55 @@ public Table addResourceTokenRefs(PhysicalDataModel pdm) { return tbl; } + /** + * The erased_resources table is used to track which logical resources and corresponding + * resource versions have been erased using the $erase operation. This table should + * typically be empty and only used temporarily by the erase DAO/procedures to indicate + * which rows have been erased. The entries in this table are then used to delete + * any offloaded payload entries. + * @param pdm + */ + public void addErasedResources(PhysicalDataModel pdm) { + final String tableName = ERASED_RESOURCES; + final String mtId = this.multitenant ? MT_ID : null; + + // Each erase operation is allocated an ERASED_RESOURCE_GROUP_ID + // value which can be used to retrieve the resource and/or + // resource-versions erased in a particular call. The rows + // can then be deleted once the erasure of any offloaded + // payload is confirmed. Note that we don't use logical_resource_id + // or resource_id values here, because those records may have + // already been deleted by $erase. + Table tbl = Table.builder(schemaName, tableName) + .setVersion(FhirSchemaVersion.V0023.vid()) + .setTenantColumnName(mtId) + .addBigIntColumn(ERASED_RESOURCE_ID, false) + .setIdentityColumn(ERASED_RESOURCE_ID, Generated.ALWAYS) + .addBigIntColumn(ERASED_RESOURCE_GROUP_ID, false) + .addIntColumn(RESOURCE_TYPE_ID, false) + .addVarcharColumn(LOGICAL_ID, LOGICAL_ID_BYTES, false) + .addIntColumn(VERSION_ID, true) + .addPrimaryKey(tableName + "_PK", ERASED_RESOURCE_ID) + .addIndex(IDX + tableName + "_GID", ERASED_RESOURCE_GROUP_ID) + .setTablespace(fhirTablespace) + .addPrivileges(resourceTablePrivileges) + .addForeignKeyConstraint(FK + tableName + "_RTID", schemaName, RESOURCE_TYPES, RESOURCE_TYPE_ID) + .enableAccessControl(this.sessionVariable) + .addWiths(addWiths()) // add table tuning + .addMigration(priorVersion -> { + List statements = new ArrayList<>(); + // Nothing yet + return statements; + }) + .build(pdm); + + // TODO should not need to add as a table and an object. Get the table to add itself? + tbl.addTag(SCHEMA_GROUP_TAG, FHIRDATA_GROUP); + this.procedureDependencies.add(tbl); + pdm.addTable(tbl); + pdm.addObject(tbl); + } + /** *
     CREATE SEQUENCE fhir_sequence
diff --git a/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirSchemaVersion.java b/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirSchemaVersion.java
index e8233b8bf38..9800f8400f7 100644
--- a/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirSchemaVersion.java
+++ b/fhir-persistence-schema/src/main/java/com/ibm/fhir/schema/control/FhirSchemaVersion.java
@@ -42,6 +42,8 @@ public enum FhirSchemaVersion {
     ,V0020(20, "issue-1834 Set PostgreSQL fillfactor", true)
     ,V0021(21, "issue-713 remove Resource_LOGICAL_RESOURCES, DomainResource_LOGICAL_RESOURCES tables", false)
     ,V0022(22, "issue-2979 stored procedure update for 2050 ifNoneMatch", false)
+    ,V0023(23, "issue-2900 erased_resources to support $erase when offloading payloads", false)
+    ,V0024(24, "issue-2900 for offloading add resource_payload_key to xx_resources", false)
     ;
 
     // The version number recorded in the VERSION_HISTORY
diff --git a/fhir-persistence-schema/src/main/resources/db2/add_any_resource.sql b/fhir-persistence-schema/src/main/resources/db2/add_any_resource.sql
index e7713799f4a..4b392d1123d 100644
--- a/fhir-persistence-schema/src/main/resources/db2/add_any_resource.sql
+++ b/fhir-persistence-schema/src/main/resources/db2/add_any_resource.sql
@@ -38,11 +38,12 @@
       IN p_last_updated               TIMESTAMP,
       IN p_is_deleted                      CHAR(  1),
       IN p_version                          INT,
-      IN p_parameter_hash_b64           VARCHAR(44 OCTETS),
+      IN p_parameter_hash_b64           VARCHAR( 44 OCTETS),
       IN p_if_none_match                    INT,
+      IN p_resource_payload_key         VARCHAR( 36 OCTETS),
       OUT o_logical_resource_id          BIGINT,
       OUT o_resource_row_id              BIGINT,
-      OUT o_current_parameter_hash      VARCHAR(44 OCTETS),
+      OUT o_current_parameter_hash      VARCHAR( 44 OCTETS),
       OUT o_interaction_status              INT,
       OUT o_if_none_match_version           INT
     )
@@ -168,9 +169,9 @@ BEGIN
   END IF; -- end if existing resource
 
   PREPARE stmt FROM
-         'INSERT INTO ' || v_schema_name || '.' || p_resource_type || '_resources (mt_id, resource_id, logical_resource_id, version_id, data, last_updated, is_deleted) '
-      || ' VALUES ( ?, ?, ?, ?, ?, ?, ?)';
-  EXECUTE stmt USING {{ADMIN_SCHEMA_NAME}}.sv_tenant_id, v_resource_id, v_logical_resource_id, p_version, p_payload, p_last_updated, p_is_deleted;
+         'INSERT INTO ' || v_schema_name || '.' || p_resource_type || '_resources (mt_id, resource_id, logical_resource_id, version_id, data, last_updated, is_deleted, resource_payload_key) '
+      || ' VALUES ( ?, ?, ?, ?, ?, ?, ?, ?)';
+  EXECUTE stmt USING {{ADMIN_SCHEMA_NAME}}.sv_tenant_id, v_resource_id, v_logical_resource_id, p_version, p_payload, p_last_updated, p_is_deleted, p_resource_payload_key;
 
   IF v_new_resource = 0 THEN
     -- As this is an existing logical resource, we need to update the xx_logical_resource values to match
diff --git a/fhir-persistence-schema/src/main/resources/db2/erase_resource.sql b/fhir-persistence-schema/src/main/resources/db2/erase_resource.sql
index e926ad9e748..cda746a3026 100644
--- a/fhir-persistence-schema/src/main/resources/db2/erase_resource.sql
+++ b/fhir-persistence-schema/src/main/resources/db2/erase_resource.sql
@@ -13,6 +13,7 @@
 -- ----------------------------------------------------------------------------
     ( IN p_resource_type                VARCHAR(  36 OCTETS),
       IN p_logical_id                   VARCHAR( 255 OCTETS),
+      IN p_erased_resource_group_id     BIGINT,
       OUT o_deleted                     BIGINT)
     LANGUAGE SQL
     MODIFIES SQL DATA
@@ -25,7 +26,7 @@ BEGIN
   DECLARE v_not_found           BIGINT DEFAULT 0;
   DECLARE v_msg                 VARCHAR(128 OCTETS) DEFAULT 'DEFAULT ERROR';
 
-  DECLARE r_stmt, dr_stmt, d_stmt, dlr_stmt, dglr_stmt, drcl_stmt STATEMENT;
+  DECLARE r_stmt, dr_stmt, d_stmt, dlr_stmt, dglr_stmt, drcl_stmt, iv_stmt STATEMENT;
 
   -- Set a condition when the resource is not found.
   DECLARE CONTINUE HANDLER FOR NOT FOUND SET v_not_found = -1;
@@ -54,6 +55,13 @@ BEGIN
     || '    WHERE LOGICAL_RESOURCE_ID = ?)';
     EXECUTE rcl_stmt USING v_logical_resource_id;
 
+    -- Step 1.1: Record the versions we need to delete if we are doing payload offload
+    PREPARE iv_stmt FROM 'INSERT INTO {{SCHEMA_NAME}}.erased_resources(mt_id, erased_resource_group_id, resource_type_id, logical_id, version_id) ' 
+        || '      SELECT ?, ?, ?, ?, version_id '
+        || '        FROM {{SCHEMA_NAME}}.' || p_resource_type || '_RESOURCES '
+        || '       WHERE LOGICAL_RESOURCE_ID = ? ';
+    EXECUTE iv_stmt USING {{ADMIN_SCHEMA_NAME}}.sv_tenant_id, p_erased_resource_group_id, v_resource_type_id, p_logical_id, v_logical_resource_id;
+
     -- Step 2: Delete All Versions from Resources Table 
     -- Create the prepared statement to delete Resource Versions in chunks
     -- Implementation note: fetch must be the last part of the sub-select
diff --git a/fhir-persistence-schema/src/main/resources/postgres/add_any_resource.sql b/fhir-persistence-schema/src/main/resources/postgres/add_any_resource.sql
index 34d155789c4..652efa6393a 100644
--- a/fhir-persistence-schema/src/main/resources/postgres/add_any_resource.sql
+++ b/fhir-persistence-schema/src/main/resources/postgres/add_any_resource.sql
@@ -38,6 +38,7 @@
       IN p_version                           INT,
       IN p_parameter_hash_b64            VARCHAR( 44),
       IN p_if_none_match                     INT,
+      IN p_resource_payload_key          VARCHAR( 36),
       OUT o_logical_resource_id           BIGINT,
       OUT o_current_parameter_hash       VARCHAR( 44),
       OUT o_interaction_status               INT,
@@ -153,9 +154,9 @@ BEGIN
   END IF; -- end if existing resource
 
   EXECUTE
-         'INSERT INTO ' || v_schema_name || '.' || p_resource_type || '_resources (resource_id, logical_resource_id, version_id, data, last_updated, is_deleted) '
-      || ' VALUES ($1, $2, $3, $4, $5, $6)'
-    USING v_resource_id, v_logical_resource_id, p_version, p_payload, p_last_updated, p_is_deleted;
+         'INSERT INTO ' || v_schema_name || '.' || p_resource_type || '_resources (resource_id, logical_resource_id, version_id, data, last_updated, is_deleted, resource_payload_key) '
+      || ' VALUES ($1, $2, $3, $4, $5, $6, $7)'
+    USING v_resource_id, v_logical_resource_id, p_version, p_payload, p_last_updated, p_is_deleted, p_resource_payload_key;
 
   
   IF v_new_resource = 0 THEN
diff --git a/fhir-persistence-schema/src/main/resources/postgres/erase_resource.sql b/fhir-persistence-schema/src/main/resources/postgres/erase_resource.sql
index c74960a18df..b19939e98d7 100644
--- a/fhir-persistence-schema/src/main/resources/postgres/erase_resource.sql
+++ b/fhir-persistence-schema/src/main/resources/postgres/erase_resource.sql
@@ -13,6 +13,7 @@
 -- ----------------------------------------------------------------------------
     ( IN p_resource_type                VARCHAR(  36),
       IN p_logical_id                   VARCHAR( 255),
+      IN p_erased_resource_group_id     BIGINT,
       OUT o_deleted                     BIGINT)
     RETURNS BIGINT
     LANGUAGE plpgsql
@@ -52,6 +53,13 @@ BEGIN
     || '    FROM {{SCHEMA_NAME}}.' || p_resource_type || '_RESOURCES'
     || '    WHERE LOGICAL_RESOURCE_ID = $1)'
     USING v_logical_resource_id;
+    
+    -- Step 1.1: Record the versions we need to delete if we are doing payload offload
+    EXECUTE 'INSERT INTO {{SCHEMA_NAME}}.erased_resources(erased_resource_group_id, resource_type_id, logical_id, version_id) ' 
+        || '      SELECT $1, $2, $3, version_id '
+        || '        FROM {{SCHEMA_NAME}}.' || p_resource_type || '_RESOURCES '
+        || '       WHERE LOGICAL_RESOURCE_ID = $4 '
+    USING p_erased_resource_group_id, v_resource_type_id, p_logical_id, v_logical_resource_id;
 
     -- Step 2: Delete All Versions from Resources Table 
     EXECUTE 'DELETE FROM {{SCHEMA_NAME}}.' || p_resource_type || '_RESOURCES WHERE LOGICAL_RESOURCE_ID = $1'
diff --git a/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/derby/DerbyFhirDatabaseTest.java b/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/derby/DerbyFhirDatabaseTest.java
index 6032398d173..72520ea6d75 100644
--- a/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/derby/DerbyFhirDatabaseTest.java
+++ b/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/derby/DerbyFhirDatabaseTest.java
@@ -138,7 +138,7 @@ protected void checkDatabase(IConnectionProvider cp, String schemaName) throws S
 
                 // Check that we have the correct number of tables. This will need to be updated
                 // whenever tables, views or sequences are added or removed
-                assertEquals(adapter.listSchemaObjects(schemaName).size(), 1917);
+                assertEquals(adapter.listSchemaObjects(schemaName).size(), 1918);
                 c.commit();
             } catch (Throwable t) {
                 c.rollback();
diff --git a/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/derby/DerbySchemaVersionsTest.java b/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/derby/DerbySchemaVersionsTest.java
index 08563486861..e7fc877e8e7 100644
--- a/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/derby/DerbySchemaVersionsTest.java
+++ b/fhir-persistence-schema/src/test/java/com/ibm/fhir/schema/derby/DerbySchemaVersionsTest.java
@@ -52,7 +52,7 @@ public void test() throws Exception {
 
             // Make sure we can correctly determine the latest schema version value
             svm.updateSchemaVersion();
-            assertEquals(svm.getVersionForSchema(), FhirSchemaVersion.V0022.vid());
+            assertEquals(svm.getVersionForSchema(), FhirSchemaVersion.V0024.vid());
 
             assertTrue(svm.isLatestSchema());
        }
diff --git a/fhir-persistence/src/main/java/com/ibm/fhir/persistence/FHIRPersistence.java b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/FHIRPersistence.java
index 14770d9c156..a1b4e519a00 100644
--- a/fhir-persistence/src/main/java/com/ibm/fhir/persistence/FHIRPersistence.java
+++ b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/FHIRPersistence.java
@@ -16,7 +16,7 @@
 import com.ibm.fhir.persistence.erase.EraseDTO;
 import com.ibm.fhir.persistence.exception.FHIRPersistenceException;
 import com.ibm.fhir.persistence.exception.FHIRPersistenceNotSupportedException;
-import com.ibm.fhir.persistence.payload.PayloadKey;
+import com.ibm.fhir.persistence.payload.PayloadPersistenceResponse;
 
 /**
  * This interface defines the contract between the FHIR Server's REST API layer and the underlying
@@ -107,7 +107,13 @@  SingleResourceResult vread(FHIRPersistenceContext contex
 
     /**
      * Deletes the specified FHIR Resource from the datastore.
-     *
+     * 
+     * This implementation of delete is open to a race condition if an update and delete
+     * are issues at the same time. This API has been deprecated and replaced with 
+     *     {@link #deleteWithMeta(FHIRPersistenceContext, Resource)}
+     * following the new pattern where the resource is never modified by the persistence
+     * layer.
+     * 
      * @param context the FHIRPersistenceContext instance associated with the current request
      * @param resourceType The type of FHIR Resource to be deleted.
      * @param logicalId the logical id of the FHIR Resource to be deleted
@@ -115,10 +121,23 @@  SingleResourceResult vread(FHIRPersistenceContext contex
      *         an OperationOutcome with hints, warnings, or errors related to the interaction
      * @throws FHIRPersistenceException
      */
+    @Deprecated
     default  SingleResourceResult delete(FHIRPersistenceContext context, Class resourceType, String logicalId) throws FHIRPersistenceException {
         throw new FHIRPersistenceNotSupportedException("The 'delete' operation is not supported by this persistence implementation");
     }
 
+    /**
+     * Deletes the FHIR resource from the datastore. The resource must be configured with the correct
+     * meta information because the persistence layer no longer makes any modifications to resources.
+     * @param 
+     * @param context
+     * @param resource
+     * @throws FHIRPersistenceException
+     */
+    default  void deleteWithMeta(FHIRPersistenceContext context, T resource) throws FHIRPersistenceException {
+        throw new FHIRPersistenceNotSupportedException("The 'delete' operation is not supported by this persistence implementation");
+    }
+
     /**
      * Retrieves all of the versions of the specified FHIR Resource.
      *
@@ -199,6 +218,15 @@ default boolean isReindexSupported() {
         return false;
     }
 
+    /**
+     * Returns true iff the persistence layer implementation supports offloading and this has been
+     * configured for the tenant/datasource
+     * @return
+     */
+    default boolean isOffloadingSupported() {
+        return false;
+    }
+
     /**
      * Initiates reindexing for either a specified list of index IDs,
      * or a randomly chosen resource. The number of resources processed is returned.
@@ -293,12 +321,14 @@ default ResourceEraseRecord erase(EraseDTO eraseDto) throws FHIRPersistenceExcep
      * {@link Future} can be used to obtain the status of the operation. If the result
      * is null, then the implementation does not support offloading and the payload must
      * be stored in the traditional manner (e.g. in the RDBMS). A {@link Future} is used
-     * because the offloading storage operation may be asynchronous.
+     * because the offloading storage operation may be asynchronous. This Future must be
+     * resolved prior to the transaction commit.
      * @param resource
      * @param logicalId
      * @param newVersionNumber
+     * @param resourcePayloadKey
      * @return
      * @throws FHIRPersistenceException
      */
-    Future storePayload(Resource resource, String logicalId, int newVersionNumber) throws FHIRPersistenceException;
+    PayloadPersistenceResponse storePayload(Resource resource, String logicalId, int newVersionNumber, String resourcePayloadKey) throws FHIRPersistenceException;
 }
\ No newline at end of file
diff --git a/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/PayloadPersistenceHelper.java b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/FHIRPersistenceSupport.java
similarity index 60%
rename from fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/PayloadPersistenceHelper.java
rename to fhir-persistence/src/main/java/com/ibm/fhir/persistence/FHIRPersistenceSupport.java
index fff62226c46..06119a819c7 100644
--- a/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/PayloadPersistenceHelper.java
+++ b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/FHIRPersistenceSupport.java
@@ -4,7 +4,7 @@
  * SPDX-License-Identifier: Apache-2.0
  */
 
-package com.ibm.fhir.persistence.payload;
+package com.ibm.fhir.persistence;
 
 import java.io.IOException;
 import java.io.InputStream;
@@ -12,6 +12,7 @@
 import java.util.List;
 import java.util.logging.Level;
 import java.util.logging.Logger;
+import java.util.zip.GZIPInputStream;
 import java.util.zip.GZIPOutputStream;
 
 import com.ibm.fhir.model.format.Format;
@@ -29,9 +30,9 @@
 /**
  * Collection of helper methods related to the persistence of FHIR resource payload data
  */
-public class PayloadPersistenceHelper {
+public class FHIRPersistenceSupport {
     // the logger to use for this class
-    private static final Logger logger = Logger.getLogger(PayloadPersistenceHelper.class.getName());
+    private static final Logger logger = Logger.getLogger(FHIRPersistenceSupport.class.getName());
     
     // initial buffer size for rendered payload
     private static final int DATA_BUFFER_INITIAL_SIZE = 10*1024; // 10KiB
@@ -42,24 +43,23 @@ public class PayloadPersistenceHelper {
      * @param compress
      * @return
      */
-    public static InputOutputByteStream render(Resource resource, boolean compress) throws FHIRPersistenceException {
+    public static InputOutputByteStream render(Resource resource, boolean compress) throws FHIRGeneratorException, IOException {
         InputOutputByteStream ioStream = new InputOutputByteStream(DATA_BUFFER_INITIAL_SIZE);
         
         if (compress) {
             try (GZIPOutputStream zipStream = new GZIPOutputStream(ioStream.outputStream())) {
                 FHIRGenerator.generator(Format.JSON, false).generate(resource, zipStream);
-                zipStream.close();
             } catch (IOException | FHIRGeneratorException x) {
-                logger.log(Level.SEVERE, "Resource: '" + resource.getClass().getSimpleName() + "/" + resource.getId() + "'", x);
-                throw new FHIRPersistenceException("Store payload failed");
+                logger.log(Level.SEVERE, "Failed generating resource: '" + resource.getClass().getSimpleName() + "/" + resource.getId() + "'", x);
+                throw x;
             }
         } else {
             // not compressed, so render directly to the ioStream
             try {
                 FHIRGenerator.generator(Format.JSON, false).generate(resource, ioStream.outputStream());            
             } catch (FHIRGeneratorException x) {
-                logger.log(Level.SEVERE, "Resource: '" + resource.getClass().getSimpleName() + "/" + resource.getId() + "'", x);
-                throw new FHIRPersistenceException("Store payload failed");
+                logger.log(Level.SEVERE, "Failed generating resource: '" + resource.getClass().getSimpleName() + "/" + resource.getId() + "'", x);
+                throw x;
             }
         }
         return ioStream;
@@ -71,12 +71,17 @@ public static InputOutputByteStream render(Resource resource, boolean compress)
      * @param resourceType
      * @param in
      * @param elements
+     * @param uncompress
      * @return
      */
-    public static  T parse(Class resourceType, InputStream in, List elements) {
+    public static  T parse(Class resourceType, InputStream in, List elements, boolean uncompress) throws FHIRParserException, IOException {
         T result;
-
         try {
+            if (uncompress) {
+                // Wrap the InputStream so we uncompress the content when reading...and
+                // see we close the stream as required in the finally block
+                in = new GZIPInputStream(in);
+            }
             if (elements != null) {
                 // parse/filter the resource using elements
                 result = FHIRParser.parser(Format.JSON).as(FHIRJsonParser.class).parseAndFilter(in, elements);
@@ -87,9 +92,11 @@ public static  T parse(Class resourceType, InputStream in
             } else {
                 result = FHIRParser.parser(Format.JSON).parse(in);
             }
-        } catch (FHIRParserException x) {
-            // need to wrap because this method is being called as a lambda
-            throw new RuntimeException(x);
+        } finally {
+            if (uncompress) {
+                // make sure we always close the GZIPInputStream to avoid leaking resources it holds onto
+                in.close();
+            }
         }
 
         return result;
@@ -102,4 +109,25 @@ public static  T parse(Class resourceType, InputStream in
     public static com.ibm.fhir.model.type.Instant getCurrentInstant() {
         return com.ibm.fhir.model.type.Instant.now(ZoneOffset.UTC);
     }
+
+    /**
+     * Obtain the versionId value from the Resource meta element, converting
+     * to an int for use by the persistence layer
+     * @param resource
+     * @return
+     * @throws FHIRPersistenceException
+     */
+    public static int getMetaVersionId(Resource resource) throws FHIRPersistenceException {
+        // Programming error if this is being called before the meta element has been set
+        // properly on the resource
+        if (resource.getMeta() == null || resource.getMeta().getVersionId() == null) {
+            throw new FHIRPersistenceException("Resource missing meta versionId");
+        }
+        
+        String versionIdValue = resource.getMeta().getVersionId().getValue();
+        if (versionIdValue == null) {
+            throw new FHIRPersistenceException("Resource missing meta versionId value");
+        }
+        return Integer.parseInt(versionIdValue);
+    }
 }
\ No newline at end of file
diff --git a/fhir-persistence/src/main/java/com/ibm/fhir/persistence/context/FHIRPersistenceContext.java b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/context/FHIRPersistenceContext.java
index 045d9356ce5..d618e3cf24a 100644
--- a/fhir-persistence/src/main/java/com/ibm/fhir/persistence/context/FHIRPersistenceContext.java
+++ b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/context/FHIRPersistenceContext.java
@@ -6,6 +6,7 @@
 
 package com.ibm.fhir.persistence.context;
 
+import com.ibm.fhir.persistence.payload.PayloadPersistenceResponse;
 import com.ibm.fhir.search.context.FHIRSearchContext;
 
 /**
@@ -47,4 +48,10 @@ public interface FHIRPersistenceContext {
      * @return the value from the If-None-Match header in the PUT request
      */
     Integer getIfNoneMatch();
+    
+    /**
+     * Get the payload persistence response 
+     * @return
+     */
+    PayloadPersistenceResponse getOffloadResponse();
 }
\ No newline at end of file
diff --git a/fhir-persistence/src/main/java/com/ibm/fhir/persistence/context/FHIRPersistenceContextFactory.java b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/context/FHIRPersistenceContextFactory.java
index 57730585003..836acc15289 100644
--- a/fhir-persistence/src/main/java/com/ibm/fhir/persistence/context/FHIRPersistenceContextFactory.java
+++ b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/context/FHIRPersistenceContextFactory.java
@@ -30,7 +30,8 @@ private FHIRPersistenceContextFactory() {
      * @param event the FHIRPersistenceEvent instance to be contained in the FHIRPersistenceContext instance
      */
     public static FHIRPersistenceContext createPersistenceContext(FHIRPersistenceEvent event) {
-        return new FHIRPersistenceContextImpl(event);
+        return FHIRPersistenceContextImpl.builder(event)
+                .build();
     }
 
     /**
@@ -39,16 +40,20 @@ public static FHIRPersistenceContext createPersistenceContext(FHIRPersistenceEve
      * @param includeDeleted flag to tell the persistence layer to include deleted resources in the operation results.
      */
     public static FHIRPersistenceContext createPersistenceContext(FHIRPersistenceEvent event, boolean includeDeleted) {
-        return new FHIRPersistenceContextImpl(event, includeDeleted);
+        return FHIRPersistenceContextImpl.builder(event)
+                .withIncludeDeleted(includeDeleted)
+                .build();
     }
     
     /**
      * Returns a FHIRPersistenceContext that contains a FHIRPersistenceEvent instance.
      * @param event the FHIRPersistenceEvent instance to be contained in the FHIRPersistenceContext instance
-     * @param ifNoneExist flag to tell the persistence layer to apply conditional create-on-update logic.
+     * @param ifNoneMatch flag to tell the persistence layer to apply conditional create-on-update logic.
      */
-    public static FHIRPersistenceContext createPersistenceContext(FHIRPersistenceEvent event, Integer ifNoneExist) {
-        return new FHIRPersistenceContextImpl(event, ifNoneExist);
+    public static FHIRPersistenceContext createPersistenceContext(FHIRPersistenceEvent event, Integer ifNoneMatch) {
+        return FHIRPersistenceContextImpl.builder(event)
+                .withIfNoneMatch(ifNoneMatch)
+                .build();
     }
 
     /**
@@ -57,7 +62,9 @@ public static FHIRPersistenceContext createPersistenceContext(FHIRPersistenceEve
      * @param historyContext the FHIRHistoryContext instance to be contained in the FHIRPersistenceContext instance
      */
     public static FHIRPersistenceContext createPersistenceContext(FHIRPersistenceEvent event, FHIRHistoryContext historyContext) {
-        return new FHIRPersistenceContextImpl(event, historyContext);
+        return FHIRPersistenceContextImpl.builder(event)
+                .withHistoryContext(historyContext)
+                .build();
     }
 
     /**
@@ -66,7 +73,9 @@ public static FHIRPersistenceContext createPersistenceContext(FHIRPersistenceEve
      * @param searchContext the FHIRSearchContext instance to be contained in the FHIRPersistenceContext instance
      */
     public static FHIRPersistenceContext createPersistenceContext(FHIRPersistenceEvent event, FHIRSearchContext searchContext) {
-        return new FHIRPersistenceContextImpl(event, searchContext);
+        return FHIRPersistenceContextImpl.builder(event)
+                .withSearchContext(searchContext)
+                .build();
     }
 
     /**
@@ -96,6 +105,9 @@ public static FHIRHistoryContext createHistoryContext() {
      * @param searchContext the FHIRSearchContext instance to be contained in the FHIRPersistenceContext instance
      */
     public static FHIRPersistenceContext createPersistenceContext(FHIRPersistenceEvent event, boolean includeDeleted, FHIRSearchContext searchContext) {
-        return new FHIRPersistenceContextImpl(event, includeDeleted, searchContext);
+        return FHIRPersistenceContextImpl.builder(event)
+                .withIncludeDeleted(includeDeleted)
+                .withSearchContext(searchContext)
+                .build();
     }
 }
diff --git a/fhir-persistence/src/main/java/com/ibm/fhir/persistence/context/impl/FHIRPersistenceContextImpl.java b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/context/impl/FHIRPersistenceContextImpl.java
index f7bf9d90b72..db7b8f761c4 100644
--- a/fhir-persistence/src/main/java/com/ibm/fhir/persistence/context/impl/FHIRPersistenceContextImpl.java
+++ b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/context/impl/FHIRPersistenceContextImpl.java
@@ -9,6 +9,7 @@
 import com.ibm.fhir.persistence.context.FHIRHistoryContext;
 import com.ibm.fhir.persistence.context.FHIRPersistenceContext;
 import com.ibm.fhir.persistence.context.FHIRPersistenceEvent;
+import com.ibm.fhir.persistence.payload.PayloadPersistenceResponse;
 import com.ibm.fhir.search.context.FHIRSearchContext;
 
 /**
@@ -22,12 +23,119 @@ public class FHIRPersistenceContextImpl implements FHIRPersistenceContext {
     private FHIRSearchContext searchContext;
     private boolean includeDeleted = false;
     private Integer ifNoneMatch;
+    
+    // The response from the payload persistence (offloading) call, if any
+    private PayloadPersistenceResponse offloadResponse;
+
+    /**
+     * Factory function to create a FHIRPersistenceContext builder
+     * @param event
+     * @return
+     */
+    public static Builder builder(FHIRPersistenceEvent event) {
+        return new Builder(event);
+    }
+
+    /**
+     * Builder to create new instances of FHIRPersistenceContextImpl
+     */
+    public static class Builder {
+        private FHIRPersistenceEvent persistenceEvent;
+        private FHIRHistoryContext historyContext;
+        private FHIRSearchContext searchContext;
+        private boolean includeDeleted;
+        private Integer ifNoneMatch;
+        private PayloadPersistenceResponse offloadResponse;
+        
+        /**
+         * Protected constructor
+         * @param event
+         */
+        protected Builder(FHIRPersistenceEvent event) {
+            this.persistenceEvent = event;
+        }
+        
+        /**
+         * Build the FHIRPersistenceContext implementation
+         * @return
+         */
+        public FHIRPersistenceContext build() {
+            FHIRPersistenceContextImpl impl;
+            
+            if (historyContext != null) {
+                impl = new FHIRPersistenceContextImpl(persistenceEvent, historyContext);
+            } else if (searchContext != null) {
+                impl = new FHIRPersistenceContextImpl(persistenceEvent, searchContext);
+            } else {
+                impl = new FHIRPersistenceContextImpl(persistenceEvent);
+            }
+            impl.setIfNoneMatch(ifNoneMatch);
+            impl.setIncludeDeleted(includeDeleted);
+            impl.setOffloadResponse(offloadResponse);
+            
+            return impl;
+        }
+        
+        /**
+         * Build with the given searchContext
+         * @param searchContext
+         * @return
+         */
+        public Builder withSearchContext(FHIRSearchContext searchContext) {
+            this.searchContext = searchContext;
+            return this;
+        }
+
+        /**
+         * Build with the given historyContext
+         * @param historyContext
+         * @return
+         */
+        public Builder withHistoryContext(FHIRHistoryContext historyContext) {
+            this.historyContext = historyContext;
+            return this;
+        }
 
-    public FHIRPersistenceContextImpl(FHIRPersistenceEvent pe) {
+        /**
+         * Build with the ifNoneMatch value
+         * @param ifNoneMatch
+         * @return
+         */
+        public Builder withIfNoneMatch(Integer ifNoneMatch) {
+            this.ifNoneMatch = ifNoneMatch;
+            return this;
+        }
+
+        /**
+         * Build with the includeDeleted value
+         * @param includeDeleted
+         * @return
+         */
+        public Builder withIncludeDeleted(boolean includeDeleted) {
+            this.includeDeleted = includeDeleted;
+            return this;
+        }
+
+        /**
+         * Build with the given offloadResponse
+         * @param offloadResponse
+         * @return
+         */
+        public Builder withOffloadResponse(PayloadPersistenceResponse offloadResponse) {
+            this.offloadResponse = offloadResponse;
+            return this;
+        }
+    }
+
+    /**
+     * Private constructor
+     * @param pe
+     */
+    private FHIRPersistenceContextImpl(FHIRPersistenceEvent pe) {
         this.persistenceEvent = pe;
     }
 
-    public FHIRPersistenceContextImpl(FHIRPersistenceEvent pe, boolean includeDeleted) {
+    private FHIRPersistenceContextImpl(FHIRPersistenceEvent pe, boolean includeDeleted) {
         this.persistenceEvent = pe;
         setIncludeDeleted(includeDeleted);
     }
@@ -37,21 +145,21 @@ public FHIRPersistenceContextImpl(FHIRPersistenceEvent pe, boolean includeDelete
      * @param pe
      * @param ifNoneMatch
      */
-    public FHIRPersistenceContextImpl(FHIRPersistenceEvent pe, Integer ifNoneMatch) {
+    private FHIRPersistenceContextImpl(FHIRPersistenceEvent pe, Integer ifNoneMatch) {
         this.persistenceEvent = pe;
         setIfNoneMatch(ifNoneMatch);
     }
     
-    public FHIRPersistenceContextImpl(FHIRPersistenceEvent pe, FHIRHistoryContext hc) {
+    private FHIRPersistenceContextImpl(FHIRPersistenceEvent pe, FHIRHistoryContext hc) {
         this.persistenceEvent = pe;
         this.historyContext = hc;
     }
-    public FHIRPersistenceContextImpl(FHIRPersistenceEvent pe, FHIRSearchContext sc) {
+    private FHIRPersistenceContextImpl(FHIRPersistenceEvent pe, FHIRSearchContext sc) {
         this.persistenceEvent = pe;
         this.searchContext = sc;
     }
 
-    public FHIRPersistenceContextImpl(FHIRPersistenceEvent pe, boolean includeDeleted, FHIRSearchContext sc) {
+    private FHIRPersistenceContextImpl(FHIRPersistenceEvent pe, boolean includeDeleted, FHIRSearchContext sc) {
         this.persistenceEvent = pe;
         setIncludeDeleted(includeDeleted);
         this.searchContext = sc;
@@ -97,4 +205,16 @@ public void setIfNoneMatch(Integer ifNoneMatch) {
     public Integer getIfNoneMatch() {
         return this.ifNoneMatch;
     }
+
+    @Override
+    public PayloadPersistenceResponse getOffloadResponse() {
+        return this.offloadResponse;
+    }
+
+    /**
+     * @param offloadResponse the offloadResponse to set
+     */
+    public void setOffloadResponse(PayloadPersistenceResponse offloadResponse) {
+        this.offloadResponse = offloadResponse;
+    }
 }
\ No newline at end of file
diff --git a/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/FHIRPayloadPartitionStrategy.java b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/FHIRPayloadPartitionStrategy.java
index d69ff18ca78..8402da8c06d 100644
--- a/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/FHIRPayloadPartitionStrategy.java
+++ b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/FHIRPayloadPartitionStrategy.java
@@ -16,5 +16,5 @@ public interface FHIRPayloadPartitionStrategy {
      * Ask for the partition name from this strategy
      * @return
      */
-    String getPartitionName();
+    String getPartitionName(String resourceType, String logicalId);
 }
\ No newline at end of file
diff --git a/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/FHIRPayloadPersistence.java b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/FHIRPayloadPersistence.java
index dc690ec8f97..b0b6d69763a 100644
--- a/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/FHIRPayloadPersistence.java
+++ b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/FHIRPayloadPersistence.java
@@ -7,7 +7,6 @@
 package com.ibm.fhir.persistence.payload;
 
 import java.util.List;
-import java.util.concurrent.Future;
 
 import com.ibm.fhir.model.resource.Resource;
 import com.ibm.fhir.persistence.exception.FHIRPersistenceException;
@@ -25,40 +24,35 @@ public interface FHIRPayloadPersistence {
      * @param resourceTypeId the database id assigned to this resource type
      * @param logicalId the logical id of the resource
      * @param version the version of the resource
+     * @param resourcePayloadKey the unique key used to tie this to the RDBMS record
      * @param resource the resource to store
-     * @return a {@link Future} holding the payload key and status.
+     * @return the payload key details and future result status.
      */
-    Future storePayload(String resourceTypeName, int resourceTypeId, String logicalId, int version, Resource resource) throws FHIRPersistenceException;
+    PayloadPersistenceResponse storePayload(String resourceTypeName, int resourceTypeId, String logicalId, int version, String resourcePayloadKey, Resource resource) throws FHIRPersistenceException;
 
     /**
      * Retrieve the payload data for the given resourceTypeId, logicalId and version. Synchronous.
      * @param resourceType the expected resource type class
-     * @param resourceTypeId the unique int idenfifier for the resource type
+     * @param rowResourceTypeName the resource type name of the resource read from the database (matching the resourceTypeId)
+     * @param resourceTypeId the unique int identifier for the resource type name
      * @param logicalId the logical identifier of the desired resource
      * @param version the specific version of the desired resource
+     * @param resourcePayloadKey the resource payload key connecting the entry to the RDBMS record
      * @param elements to filter elements within the resource - can be null
-     * @return the fhirResourcePayload exactly as it was provided to {@link #storePayload(String, int, String, int, byte[])}
+     * @return the fhirResourcePayload exactly as it was provided to {@link #storePayload(String, int, String, int, String, byte[])}
      */
-     T readResource(Class resourceType, int resourceTypeId, String logicalId, int version, List elements) throws FHIRPersistenceException;
+     T readResource(Class resourceType, String rowResourceTypeName, int resourceTypeId, String logicalId, int version, String resourcePayloadKey, List elements) throws FHIRPersistenceException;
 
     /**
-     * Fetch the resource directly using the payload key. This is faster than {@link #readResource(Class, int, String, int, List)}
-     * because the payload persistence implementation can use the {@link PayloadKey} to directly address the location where the
-     * payload is stored. Allows async implementations.
-     * @param 
+     * Delete the payload item. This may be called to clean up after a failed transaction or
+     * by the reconciliation process when it finds an orphaned record.
+     * when performing a hard delete on a resource.
      * @param resourceType
-     * @param payloadKey
-     * @return a Future that will hold the resource after it has been read
-     * @throws FHIRPersistenceException
-     */
-     Future readResource(Class resourceType, PayloadKey payloadKey) throws FHIRPersistenceException;
-
-    /**
-     * Delete the payload item. This may be called to clean up after a failed transaction
      * @param resourceTypeId
      * @param logicalId
-     * @param version
+     * @param version the version id, or null for all versions
+     * @param resourcePayloadKey the key to make sure the entry matches the RDBMS record
      * @throws FHIRPersistenceException
      */
-    void deletePayload(int resourceTypeId, String logicalId, int version) throws FHIRPersistenceException;
+    void deletePayload(String resourceType, int resourceTypeId, String logicalId, Integer version, String resourcePayloadKey) throws FHIRPersistenceException;
 }
\ No newline at end of file
diff --git a/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/PayloadKey.java b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/PayloadPersistenceResponse.java
similarity index 55%
rename from fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/PayloadKey.java
rename to fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/PayloadPersistenceResponse.java
index 2fa6ec59c53..e969b0dbcbd 100644
--- a/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/PayloadKey.java
+++ b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/PayloadPersistenceResponse.java
@@ -1,16 +1,22 @@
 /*
- * (C) Copyright IBM Corp. 2021
+ * (C) Copyright IBM Corp. 2021, 2022
  *
  * SPDX-License-Identifier: Apache-2.0
  */
  
 package com.ibm.fhir.persistence.payload;
 
+import java.util.concurrent.Future;
+
 /**
- * A key used to identify a payload object stored by the payload persistence layer
+ * Data carrier encapsulating the response from the payload persistence component
+ * when making a call to offload the resource payload.
  */
-public class PayloadKey {
+public class PayloadPersistenceResponse {
 
+    // The UUID value used to tie together the RDBMS and offload records
+    private final String resourcePayloadKey;
+    
     // The string name of the resource type
     private final String resourceTypeName;
     
@@ -23,56 +29,40 @@ public class PayloadKey {
     // The version id of the resource
     private final int versionId;
     
-    // Identifies the partition used to store the payload in a partitioned system (like Cassandra)
-    private final String partitionKey;
-    
-    // The identifier assigned by the payload persistence layer
-    private final String payloadId;
-
-    // The status of the payload persistence operation
-    private final Status status;
+    // The (future) result status of the async persistence call
+    private final Future result;
 
-    /**
-     * Enumeration of status types
-     */
-    public static enum Status {
-        OK, FAILED
-    }
-    
     /**
      * Public constructor
+     * @param resourcePayloadKey
      * @param resourceTypeName
      * @param resourceTypeId
      * @param logicalId
      * @param versionId
-     * @param partitionKey
-     * @param payloadId
-     * @param status
+     * @param result
      */
-    public PayloadKey(String resourceTypeName, int resourceTypeId, String logicalId, int versionId, String partitionKey, String payloadId,
-        Status status) {
+    public PayloadPersistenceResponse(String resourcePayloadKey, String resourceTypeName, int resourceTypeId, String logicalId, int versionId,
+            Future result) {
+        this.resourcePayloadKey = resourcePayloadKey;
         this.resourceTypeName = resourceTypeName;
         this.resourceTypeId = resourceTypeId;
         this.logicalId = logicalId;
         this.versionId = versionId;
-        this.partitionKey = partitionKey;
-        this.payloadId = payloadId;
-        this.status = status;
+        this.result = result;
     }
     
     @Override
     public String toString() {
         StringBuilder result = new StringBuilder();
-        result.append(partitionKey);
-        result.append("-");
-        result.append(payloadId);
-        result.append("[");
         result.append(resourceTypeName);
-        result.append("/");
+        result.append("[");
+        result.append(resourceTypeId);
+        result.append("]/");
         result.append(logicalId);
         result.append("/");
         result.append(versionId);
-        result.append("]");
+        result.append("/");
+        result.append(this.resourcePayloadKey);
         return result.toString();
     }
     
@@ -105,23 +95,16 @@ public int getVersionId() {
     }
     
     /**
-     * @return the partitionKey
-     */
-    public String getPartitionKey() {
-        return partitionKey;
-    }
-    
-    /**
-     * @return the payloadId
+     * @return the resourcePayloadKey
      */
-    public String getPayloadId() {
-        return payloadId;
+    public String getResourcePayloadKey() {
+        return resourcePayloadKey;
     }
 
     /**
-     * @return the status
+     * @return the result
      */
-    public Status getStatus() {
-        return status;
+    public Future getResult() {
+        return result;
     }
 }
\ No newline at end of file
diff --git a/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/PayloadPersistenceResult.java b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/PayloadPersistenceResult.java
new file mode 100644
index 00000000000..013ff4a3a2e
--- /dev/null
+++ b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/PayloadPersistenceResult.java
@@ -0,0 +1,33 @@
+/*
+ * (C) Copyright IBM Corp. 2021
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+ 
+package com.ibm.fhir.persistence.payload;
+
+/**
+ * The response from the payload persistence operation
+ */
+public class PayloadPersistenceResult {
+    // The status of the payload persistence operation
+    private final Status status;
+    
+    /**
+     * Enumeration of status types
+     */
+    public static enum Status {
+        OK, FAILED
+    }
+    
+    public PayloadPersistenceResult(Status status) {
+        this.status = status;
+    }
+
+    /**
+     * @return the status
+     */
+    public Status getStatus() {
+        return status;
+    }
+}
\ No newline at end of file
diff --git a/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/PayloadReader.java b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/PayloadReader.java
new file mode 100644
index 00000000000..fa748d23661
--- /dev/null
+++ b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/PayloadReader.java
@@ -0,0 +1,28 @@
+/*
+ * (C) Copyright IBM Corp. 2021
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+ 
+package com.ibm.fhir.persistence.payload;
+
+import java.io.InputStream;
+
+import com.ibm.fhir.model.resource.Resource;
+import com.ibm.fhir.persistence.exception.FHIRPersistenceException;
+
+/**
+ * Strategy for reading a resource from a stream
+ */
+public interface PayloadReader {
+
+    /**
+     * Read the resource of type T from the {@link InputStream}.
+     * @param 
+     * @param resourceType
+     * @param is
+     * @return
+     * @throws FHIRPersistenceException
+     */
+     T read(Class resourceType, InputStream is) throws FHIRPersistenceException;
+}
diff --git a/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/PayloadReaderImpl.java b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/PayloadReaderImpl.java
new file mode 100644
index 00000000000..77551a8288a
--- /dev/null
+++ b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/payload/PayloadReaderImpl.java
@@ -0,0 +1,47 @@
+/*
+ * (C) Copyright IBM Corp. 2021
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+ 
+package com.ibm.fhir.persistence.payload;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.List;
+
+import com.ibm.fhir.model.parser.exception.FHIRParserException;
+import com.ibm.fhir.model.resource.Resource;
+import com.ibm.fhir.persistence.FHIRPersistenceSupport;
+import com.ibm.fhir.persistence.exception.FHIRPersistenceException;
+
+
+/**
+ * Strategy for reading a payload object with optional compression
+ */
+public class PayloadReaderImpl implements PayloadReader {
+    // Is the input stream compressed?
+    private final boolean uncompress;
+    
+    // Subset elements when parsing the Resource
+    private final List elements;
+
+    /**
+     * Public constructor
+     * @param uncompress
+     * @param elements
+     */
+    public PayloadReaderImpl(boolean uncompress, List elements) {
+        this.uncompress = uncompress;
+        this.elements = elements;
+    }
+
+    @Override
+    public  T read(Class resourceType, InputStream inputStream) throws FHIRPersistenceException {
+        try {
+            return FHIRPersistenceSupport.parse(resourceType, inputStream, elements, uncompress);
+        } catch (IOException | FHIRParserException x) {
+            throw new FHIRPersistenceException("Error reading resource", x);
+        }
+    }
+}
diff --git a/fhir-persistence/src/main/java/com/ibm/fhir/persistence/util/InputOutputByteStream.java b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/util/InputOutputByteStream.java
index 4ac8ee44479..a510570c40d 100644
--- a/fhir-persistence/src/main/java/com/ibm/fhir/persistence/util/InputOutputByteStream.java
+++ b/fhir-persistence/src/main/java/com/ibm/fhir/persistence/util/InputOutputByteStream.java
@@ -73,7 +73,7 @@ private class ByteOutputStream extends OutputStream {
         @Override
         public void write(int b) throws IOException {
             int idx = offset++;
-            extend(idx);
+            extend(offset); // use the new length
             buffer[idx] = (byte)b;
         }
 
@@ -167,8 +167,12 @@ public InputOutputByteStream(int initialCapacity) {
         this.reshapeStrat = new ReshapeStrategy();
     }
 
+    /**
+     * Adopt a buffer which may already contain data
+     * @param adoptBuffer
+     * @param offset
+     */
     public InputOutputByteStream(byte[] adoptBuffer, int offset) {
-        // Adopt a buffer which may already contain data
         this.buffer = adoptBuffer;
         this.offset = offset;
 
@@ -178,6 +182,23 @@ public InputOutputByteStream(byte[] adoptBuffer, int offset) {
         }
         this.reshapeStrat = new ReshapeStrategy();
     }
+    
+    /**
+     * Initialize the internal buffer by copying the contents of the given ByteBuffer
+     * (which can be read-only).
+     * @param bb
+     */
+    public InputOutputByteStream(ByteBuffer bb) {
+        int size = bb.remaining();
+        if (size < 1) {
+            throw new IllegalArgumentException("Buffer is empty");
+        }
+
+        this.buffer = new byte[size];
+        bb.get(buffer);
+        this.offset = size;
+        this.reshapeStrat = new ReshapeStrategy();
+    }
 
     /**
      * Create a buffer with a given capacity and override the {@link ReshapeStrategy}
@@ -248,4 +269,12 @@ public ByteBuffer wrap() {
     public void reset() {
         this.offset = 0;
     }
+    
+    /**
+     * Get the underlying byte[] buffer at the current point in time.
+     * @return
+     */
+    public byte[] getRawBuffer() {
+        return this.buffer;
+    }
 }
\ No newline at end of file
diff --git a/fhir-persistence/src/test/java/com/ibm/fhir/persistence/test/MockPersistenceImpl.java b/fhir-persistence/src/test/java/com/ibm/fhir/persistence/test/MockPersistenceImpl.java
index 698c4920270..b7ae6fbca63 100644
--- a/fhir-persistence/src/test/java/com/ibm/fhir/persistence/test/MockPersistenceImpl.java
+++ b/fhir-persistence/src/test/java/com/ibm/fhir/persistence/test/MockPersistenceImpl.java
@@ -9,7 +9,6 @@
 import java.time.Instant;
 import java.util.Collections;
 import java.util.List;
-import java.util.concurrent.Future;
 import java.util.function.Function;
 
 import com.ibm.fhir.model.resource.OperationOutcome;
@@ -24,7 +23,7 @@
 import com.ibm.fhir.persistence.context.FHIRPersistenceContext;
 import com.ibm.fhir.persistence.exception.FHIRPersistenceException;
 import com.ibm.fhir.persistence.exception.FHIRPersistenceResourceDeletedException;
-import com.ibm.fhir.persistence.payload.PayloadKey;
+import com.ibm.fhir.persistence.payload.PayloadPersistenceResponse;
 
 /**
  * Mock implementation of FHIRPersistence for use during testing.
@@ -128,7 +127,7 @@ public  SingleResourceResult updateWithMeta(FHIRPersisten
     }
 
     @Override
-    public Future storePayload(Resource resource, String logicalId, int newVersionNumber) {
+    public PayloadPersistenceResponse storePayload(Resource resource, String logicalId, int newVersionNumber, String resourcePayloadKey) {
         return null;
     }
 
diff --git a/fhir-persistence/src/test/java/com/ibm/fhir/persistence/test/common/AbstractPersistenceTest.java b/fhir-persistence/src/test/java/com/ibm/fhir/persistence/test/common/AbstractPersistenceTest.java
index e6736663463..ad64ff540ea 100644
--- a/fhir-persistence/src/test/java/com/ibm/fhir/persistence/test/common/AbstractPersistenceTest.java
+++ b/fhir-persistence/src/test/java/com/ibm/fhir/persistence/test/common/AbstractPersistenceTest.java
@@ -29,10 +29,7 @@
 import com.ibm.fhir.config.FHIRConfiguration;
 import com.ibm.fhir.config.FHIRRequestContext;
 import com.ibm.fhir.model.resource.Resource;
-import com.ibm.fhir.model.resource.Resource.Builder;
-import com.ibm.fhir.model.type.Id;
 import com.ibm.fhir.model.type.Instant;
-import com.ibm.fhir.model.type.Meta;
 import com.ibm.fhir.persistence.FHIRPersistence;
 import com.ibm.fhir.persistence.MultiResourceResult;
 import com.ibm.fhir.persistence.context.FHIRHistoryContext;
diff --git a/fhir-persistence/src/test/java/com/ibm/fhir/persistence/test/common/AbstractReverseChainTest.java b/fhir-persistence/src/test/java/com/ibm/fhir/persistence/test/common/AbstractReverseChainTest.java
index be15e31eae0..207642adce2 100644
--- a/fhir-persistence/src/test/java/com/ibm/fhir/persistence/test/common/AbstractReverseChainTest.java
+++ b/fhir-persistence/src/test/java/com/ibm/fhir/persistence/test/common/AbstractReverseChainTest.java
@@ -86,6 +86,7 @@ public void createResources() throws Exception {
         Coding uniqueTag = Coding.builder().system(uri("http://ibm.com/fhir/tag")).code(code(now.toString())).build();
         Coding uniqueSecurity = Coding.builder().system(uri("http://ibm.com/fhir/security")).code(code(now.toString())).build();
 
+        startTrx();
         // Organizations that will be referenced by a Patient
         savedOrg1 = org.toBuilder().active(com.ibm.fhir.model.type.Boolean.of(true)).build();
         savedOrg1 = persistence.create(getDefaultPersistenceContext(), savedOrg1).getResource();
@@ -180,6 +181,7 @@ public void createResources() throws Exception {
                 .device(reference("Device/" + savedDevice2.getId() + "/_history/2"))
                 .build();
         savedObservation6 = persistence.create(getDefaultPersistenceContext(), savedObservation6).getResource();
+        commitTrx();
     }
 
     @AfterClass
diff --git a/fhir-server-spi/src/main/java/com/ibm/fhir/server/spi/operation/FHIRResourceHelpers.java b/fhir-server-spi/src/main/java/com/ibm/fhir/server/spi/operation/FHIRResourceHelpers.java
index d0930140a9e..b21673350fd 100644
--- a/fhir-server-spi/src/main/java/com/ibm/fhir/server/spi/operation/FHIRResourceHelpers.java
+++ b/fhir-server-spi/src/main/java/com/ibm/fhir/server/spi/operation/FHIRResourceHelpers.java
@@ -9,7 +9,6 @@
 import java.time.Instant;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.Future;
 
 import javax.ws.rs.core.MultivaluedMap;
 
@@ -25,7 +24,7 @@
 import com.ibm.fhir.persistence.context.FHIRPersistenceEvent;
 import com.ibm.fhir.persistence.erase.EraseDTO;
 import com.ibm.fhir.persistence.exception.FHIRPersistenceException;
-import com.ibm.fhir.persistence.payload.PayloadKey;
+import com.ibm.fhir.persistence.payload.PayloadPersistenceResponse;
 import com.ibm.fhir.search.context.FHIRSearchContext;
 
 /**
@@ -134,10 +133,11 @@ FHIRRestOperationResponse doCreateMeta(FHIRPersistenceEvent event, List w
      * @param event
      * @param warnings
      * @param resource
+     * @param offloadResponse
      * @return
      * @throws Exception
      */
-    FHIRRestOperationResponse doCreatePersist(FHIRPersistenceEvent event, List warnings, Resource resource) throws Exception;
+    FHIRRestOperationResponse doCreatePersist(FHIRPersistenceEvent event, List warnings, Resource resource, PayloadPersistenceResponse offloadResponse) throws Exception;
 
     /**
      * 1st phase of update interaction.
@@ -168,11 +168,12 @@ FHIRRestOperationResponse doUpdateMeta(FHIRPersistenceEvent event, String type,
      * @param warnings
      * @param isDeleted
      * @param ifNoneMatch
+     * @param offloadResponse
      * @return
      * @throws Exception
      */
     public FHIRRestOperationResponse doPatchOrUpdatePersist(FHIRPersistenceEvent event, String type, String id, boolean isPatch,
-        Resource newResource, Resource prevResource, List warnings, boolean isDeleted, Integer ifNoneMatch) throws Exception;
+        Resource newResource, Resource prevResource, List warnings, boolean isDeleted, Integer ifNoneMatch, PayloadPersistenceResponse offloadResponse) throws Exception;
 
     /**
      * Builds a collection of properties that will be passed to the persistence interceptors.
@@ -540,9 +541,10 @@ default ResourceEraseRecord doErase(FHIROperationContext operationContext, Erase
      * @param resource the resource to store (with correct Meta fields)
      * @param logicalId the logical id of the resource
      * @param newVersionNumber the version number to use
-     * @return a Future response to the payload store operation, or null if it is not supported
+     * @param resourcePayloadKey the key used to tie the RDBMS record with the offload record
+     * @return a response to the payload store operation, or null if it is not supported
      */
-    Future storePayload(Resource resource, String logicalId, int newVersionNumber) throws Exception;
+    PayloadPersistenceResponse storePayload(Resource resource, String logicalId, int newVersionNumber, String resourcePayloadKey) throws Exception;
 
     /**
      * Validate a resource. First validate profile assertions for the resource if configured to do so,
diff --git a/fhir-server-spi/src/main/java/com/ibm/fhir/server/spi/operation/FHIRRestOperationResponse.java b/fhir-server-spi/src/main/java/com/ibm/fhir/server/spi/operation/FHIRRestOperationResponse.java
index 225627d5323..e6342c0d175 100644
--- a/fhir-server-spi/src/main/java/com/ibm/fhir/server/spi/operation/FHIRRestOperationResponse.java
+++ b/fhir-server-spi/src/main/java/com/ibm/fhir/server/spi/operation/FHIRRestOperationResponse.java
@@ -7,13 +7,12 @@
 package com.ibm.fhir.server.spi.operation;
 
 import java.net.URI;
-import java.util.concurrent.Future;
 
 import javax.ws.rs.core.Response;
 
 import com.ibm.fhir.model.resource.OperationOutcome;
 import com.ibm.fhir.model.resource.Resource;
-import com.ibm.fhir.persistence.payload.PayloadKey;
+import com.ibm.fhir.persistence.payload.PayloadPersistenceResponse;
 
 /**
  * This class is used to represent a response returned by the FHIR resource helper methods.
@@ -30,7 +29,7 @@ public class FHIRRestOperationResponse {
     private boolean completed;
 
     // A nested response we may get when offloading payload storage (e.g. in COS, Cassandra)
-    private Future storePayloadResponse;
+    private PayloadPersistenceResponse storePayloadResponse;
 
     // The id of the resource, which could be new in the case of create
     private String resourceId;
@@ -49,7 +48,7 @@ public FHIRRestOperationResponse(Response.Status status, URI locationURI, Operat
         setOperationOutcome(operationOutcome);
     }
 
-    public FHIRRestOperationResponse(Resource resource, String resourceId, Future storePayloadResponse) {
+    public FHIRRestOperationResponse(Resource resource, String resourceId, PayloadPersistenceResponse storePayloadResponse) {
         this.resource = resource;
         this.resourceId = resourceId;
         this.setStorePayloadResponse(storePayloadResponse);
@@ -129,14 +128,14 @@ public void setCompleted(boolean completed) {
     /**
      * @return the storePayloadResponse
      */
-    public Future getStorePayloadResponse() {
+    public PayloadPersistenceResponse getStorePayloadResponse() {
         return storePayloadResponse;
     }
 
     /**
      * @param storePayloadResponse the storePayloadResponse to set
      */
-    public void setStorePayloadResponse(Future storePayloadResponse) {
+    public void setStorePayloadResponse(PayloadPersistenceResponse storePayloadResponse) {
         this.storePayloadResponse = storePayloadResponse;
     }
-}
+}
\ No newline at end of file
diff --git a/fhir-server-test/src/test/java/com/ibm/fhir/server/test/examples/R4ExampleServerTest.java b/fhir-server-test/src/test/java/com/ibm/fhir/server/test/examples/R4ExampleServerTest.java
index cb3e0a53e6b..da91fdaf984 100644
--- a/fhir-server-test/src/test/java/com/ibm/fhir/server/test/examples/R4ExampleServerTest.java
+++ b/fhir-server-test/src/test/java/com/ibm/fhir/server/test/examples/R4ExampleServerTest.java
@@ -6,11 +6,13 @@
 
 package com.ibm.fhir.server.test.examples;
 
+import java.util.Properties;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 
 import org.testng.annotations.Test;
 
+import com.ibm.fhir.client.FHIRClient;
 import com.ibm.fhir.examples.Index;
 import com.ibm.fhir.model.spec.test.DriverMetrics;
 import com.ibm.fhir.model.spec.test.R4ExamplesDriver;
@@ -21,6 +23,9 @@
  * Basic sniff test of the FHIR Server.
  */
 public class R4ExampleServerTest extends FHIRServerTestBase {
+    
+    // the tenant id to use for the FHIR server requests
+    private String tenantId;
 
     /**
      * Process all the examples in the fhir-r4-spec example library
@@ -34,14 +39,21 @@ public void processExamples() throws Exception {
         // Setup a Pool
         ExecutorService es = Executors.newFixedThreadPool(5);
         driver.setPool(es, 5);
+        
 
         DriverMetrics dm = new DriverMetrics();
         driver.setMetrics(dm);
         driver.setValidator(new ValidationProcessor());
-        driver.setProcessor(new ExampleRequestProcessor(this, "default", dm, 1));
+        driver.setProcessor(new ExampleRequestProcessor(this, tenantId, dm, 1));
 
         String index = System.getProperty(this.getClass().getName()
             + ".index", Index.MINIMAL_JSON.name());
         driver.processIndex(Index.valueOf(index));
     }
+
+    @Override
+    public void setUp(Properties properties) throws Exception {
+        super.setUp(properties);
+        this.tenantId = properties.getProperty(FHIRClient.PROPNAME_TENANT_ID, "default");
+    }
 }
diff --git a/fhir-server-test/src/test/java/com/ibm/fhir/server/test/operation/EraseOperationTest.java b/fhir-server-test/src/test/java/com/ibm/fhir/server/test/operation/EraseOperationTest.java
index 67f9428cb83..40a49f29d20 100644
--- a/fhir-server-test/src/test/java/com/ibm/fhir/server/test/operation/EraseOperationTest.java
+++ b/fhir-server-test/src/test/java/com/ibm/fhir/server/test/operation/EraseOperationTest.java
@@ -225,16 +225,17 @@ private void eraseResource(String resourceType, String logicalId, boolean error,
     private void eraseResourceByVersion(String resourceType, String logicalId, Integer version, boolean error, String msg, boolean patient, boolean reason, String reasonMsg) {
         Entity entity = Entity.entity(generateParameters(patient, reason, reasonMsg, Optional.of(version)), FHIRMediaType.APPLICATION_FHIR_JSON);
 
+        final String requestPath = "/" + resourceType + "/" + logicalId + "/$erase";
         Response r = getWebTarget()
-            .path("/" + resourceType + "/" + logicalId + "/$erase")
+            .path(requestPath)
             .request(FHIRMediaType.APPLICATION_FHIR_JSON)
             .header("X-FHIR-TENANT-ID", "default")
             .header("X-FHIR-DSID", "default")
             .post(entity, Response.class);
         if (error) {
-            assertEquals(r.getStatus(), Response.Status.BAD_REQUEST.getStatusCode());
+            assertEquals(r.getStatus(), Response.Status.BAD_REQUEST.getStatusCode(), requestPath);
         } else {
-            assertEquals(r.getStatus(), Response.Status.OK.getStatusCode());
+            assertEquals(r.getStatus(), Response.Status.OK.getStatusCode(), requestPath);
         }
     }
 
@@ -409,12 +410,13 @@ private void checkResourceDeletedNotErased(String resourceType, String logicalId
      */
     private void checkResourceHistoryDoesNotExist(String resourceType, String logicalId, Integer version) {
         WebTarget target = getWebTarget();
-        target = target.path("/" + resourceType + "/" + logicalId + "/_history/" + version);
+        final String resourcePath = "/" + resourceType + "/" + logicalId + "/_history/" + version;
+        target = target.path(resourcePath);
         Response r = target.request(FHIRMediaType.APPLICATION_FHIR_JSON)
                 .header("X-FHIR-TENANT-ID", "default")
                 .header("X-FHIR-DSID", "default")
                 .get(Response.class);
-        assertEquals(r.getStatus(), Status.NOT_FOUND.getStatusCode());
+        assertEquals(r.getStatus(), Status.NOT_FOUND.getStatusCode(), resourcePath);
     }
 
     /**
diff --git a/fhir-server-test/src/test/resources/test.properties b/fhir-server-test/src/test/resources/test.properties
index 057ec37a767..629b05a65ee 100644
--- a/fhir-server-test/src/test/resources/test.properties
+++ b/fhir-server-test/src/test/resources/test.properties
@@ -36,7 +36,9 @@ test.kafka.topicName = fhirNotifications
 fhirclient.logging.enabled = false
 
 fhirclient.hostnameVerification.enabled = false
-fhirclient.http.receive.timeout = 60000
+
+# Longer than the default Liberty transaction timeout 120s
+fhirclient.http.receive.timeout = 130000
 fhirclient.http.return.pref = minimal
 
 # Used to turn on or off tests or throttle
diff --git a/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionCreate.java b/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionCreate.java
index ffc5ce101c1..a5b67be4282 100644
--- a/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionCreate.java
+++ b/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionCreate.java
@@ -45,12 +45,18 @@ public FHIRRestInteractionCreate(int entryIndex, FHIRPersistenceEvent event, Ent
     @Override
     public void process(FHIRRestInteractionVisitor visitor) throws Exception {
         FHIRRestOperationResponse result = visitor.doCreate(getEntryIndex(), getEvent(), getWarnings(),
-                getValidationResponseEntry(), getRequestDescription(), getRequestURL(),
-                getAccumulatedTime(), type, getNewResource(), ifNoneExist, localIdentifier);
+                getValidationResponseEntry(), getRequestDescription(), getRequestURL(), getAccumulatedTime(), type,
+                getNewResource(), ifNoneExist, localIdentifier, getOffloadResponse());
 
         // update the resource so we can use it when called in the next processing phase
-        if (result != null && result.getResource() != null) {
-            setNewResource(result.getResource());
+        if (result != null) {
+            if (result.getResource() != null) {
+                setNewResource(result.getResource());
+            }
+            
+            if (result.getStorePayloadResponse() != null) {
+                setOffloadResponse(result.getStorePayloadResponse());
+            }
         }
     }
 }
\ No newline at end of file
diff --git a/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionPatch.java b/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionPatch.java
index 317c7bab7f3..f024f309b10 100644
--- a/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionPatch.java
+++ b/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionPatch.java
@@ -57,7 +57,7 @@ public FHIRRestInteractionPatch(int entryIndex, FHIRPersistenceEvent event, Stri
     public void process(FHIRRestInteractionVisitor visitor) throws Exception {
         FHIRRestOperationResponse result = visitor.doPatch(getEntryIndex(), getEvent(), getValidationResponseEntry(),
                 getRequestDescription(), getRequestURL(), getAccumulatedTime(), type, id, getNewResource(),
-                getPrevResource(), patch, ifMatchValue, searchQueryString, skippableUpdate, getWarnings(), localIdentifier);
+                getPrevResource(), patch, ifMatchValue, searchQueryString, skippableUpdate, getWarnings(), localIdentifier, getOffloadResponse());
 
         // If the response includes a resource, update our copy so that we can pass to the
         // next visitor.
@@ -69,6 +69,10 @@ public void process(FHIRRestInteractionVisitor visitor) throws Exception {
             if (result.getPrevResource() != null) {
                 setPrevResource(result.getPrevResource());
             }
+
+            if (result.getStorePayloadResponse() != null) {
+                setOffloadResponse(result.getStorePayloadResponse());
+            }
         }
     }
 }
\ No newline at end of file
diff --git a/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionResource.java b/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionResource.java
index f290798bc21..e406d33934a 100644
--- a/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionResource.java
+++ b/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionResource.java
@@ -9,6 +9,7 @@
 import com.ibm.fhir.model.resource.Bundle.Entry;
 import com.ibm.fhir.model.resource.Resource;
 import com.ibm.fhir.persistence.context.FHIRPersistenceEvent;
+import com.ibm.fhir.persistence.payload.PayloadPersistenceResponse;
 import com.ibm.fhir.server.util.FHIRUrlParser;
 
 /**
@@ -26,6 +27,9 @@ public abstract class FHIRRestInteractionResource extends FHIRRestInteractionBas
 
     // The previous resource (e.g. if read from the database
     private Resource prevResource;
+    
+    // The response from payload persistence when offloading
+    private PayloadPersistenceResponse offloadResponse;
 
     /**
      * Protected constructor
@@ -90,4 +94,18 @@ public Resource getPrevResource() {
     public FHIRPersistenceEvent getEvent() {
         return event;
     }
+
+    /**
+     * @return the offloadResponse
+     */
+    public PayloadPersistenceResponse getOffloadResponse() {
+        return offloadResponse;
+    }
+
+    /**
+     * @param offloadResponse the offloadResponse to set
+     */
+    public void setOffloadResponse(PayloadPersistenceResponse offloadResponse) {
+        this.offloadResponse = offloadResponse;
+    }
 }
\ No newline at end of file
diff --git a/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionUpdate.java b/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionUpdate.java
index 52fe8531a52..98e3cd5d337 100644
--- a/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionUpdate.java
+++ b/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionUpdate.java
@@ -64,7 +64,8 @@ public void process(FHIRRestInteractionVisitor visitor) throws Exception {
 
         FHIRRestOperationResponse result = visitor.doUpdate(getEntryIndex(), getEvent(), getValidationResponseEntry(),
                 getRequestDescription(), getRequestURL(), getAccumulatedTime(), type, id, getNewResource(),
-                getPrevResource(), ifMatchValue, searchQueryString, skippableUpdate, localIdentifier, getWarnings(), deleted, ifNoneMatch);
+                getPrevResource(), ifMatchValue, searchQueryString, skippableUpdate, localIdentifier, getWarnings(), deleted, ifNoneMatch,
+                getOffloadResponse());
 
         // update the resource so we can use it when called in the next processing phase
         if (result != null) {
@@ -75,7 +76,11 @@ public void process(FHIRRestInteractionVisitor visitor) throws Exception {
             if (result.getPrevResource() != null) {
                 setPrevResource(result.getPrevResource());
             }
-            
+
+            if (result.getStorePayloadResponse() != null) {
+                setOffloadResponse(result.getStorePayloadResponse());
+            }
+
             // Record the deletion status so we can return the correct response when undeleting
             this.deleted = result.isDeleted();
         }
diff --git a/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionVisitor.java b/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionVisitor.java
index e01a885d789..67c8af05613 100644
--- a/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionVisitor.java
+++ b/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionVisitor.java
@@ -16,6 +16,7 @@
 import com.ibm.fhir.model.resource.OperationOutcome.Issue;
 import com.ibm.fhir.model.resource.Resource;
 import com.ibm.fhir.persistence.context.FHIRPersistenceEvent;
+import com.ibm.fhir.persistence.payload.PayloadPersistenceResponse;
 import com.ibm.fhir.server.spi.operation.FHIROperationContext;
 import com.ibm.fhir.server.spi.operation.FHIRRestOperationResponse;
 import com.ibm.fhir.server.util.FHIRUrlParser;
@@ -141,12 +142,14 @@ FHIRRestOperationResponse doHistory(int entryIndex, String requestDescription, F
      * @param ifNoneExist
      *            whether to create the resource if none exists
      * @param localIdentifier
+     * @param offloadResponse
+     *            the response from payload persistence when offloading
      * @return a FHIRRestOperationResponse object containing the results of the operation
      * @throws Exception
      */
     FHIRRestOperationResponse doCreate(int entryIndex, FHIRPersistenceEvent event, List warnings,
             Entry validationResponseEntry, String requestDescription, FHIRUrlParser requestURL, long accumulatedTime,
-            String type, Resource resource, String ifNoneExist, String localIdentifier) throws Exception;
+            String type, Resource resource, String ifNoneExist, String localIdentifier, PayloadPersistenceResponse offloadResponse) throws Exception;
 
     /**
      * Performs an update operation (a new version of the Resource will be stored).
@@ -180,13 +183,16 @@ FHIRRestOperationResponse doCreate(int entryIndex, FHIRPersistenceEvent event, L
      *            flag to indicate if the resource is currently deleted
      * @param ifNoneMatch
      *            conditional create-on-update
+     * @param offloadResponse
+     *            the response from payload persistence when offloading
      * @return a FHIRRestOperationResponse that contains the results of the operation
      * @throws Exception
      */
     FHIRRestOperationResponse doUpdate(int entryIndex, FHIRPersistenceEvent event, Entry validationResponseEntry,
             String requestDescription, FHIRUrlParser requestURL, long accumulatedTime, String type, String id,
             Resource newResource, Resource prevResource, String ifMatchValue, String searchQueryString,
-            boolean skippableUpdate, String localIdentifier, List warnings, boolean isDeleted, Integer ifNoneMatch) throws Exception;
+            boolean skippableUpdate, String localIdentifier, List warnings, boolean isDeleted, Integer ifNoneMatch,
+            PayloadPersistenceResponse offloadResponse) throws Exception;
 
     /**
      * Performs a patch operation (a new version of the Resource will be stored).
@@ -215,6 +221,8 @@ FHIRRestOperationResponse doUpdate(int entryIndex, FHIRPersistenceEvent event, E
      * @param skippableUpdate
      *            if true, and the result of the patch matches the existing resource on the server, then skip the update;
      *            if false, then always attempt the update
+     * @param offloadResponse
+     *            response from payload persistencen when offloading
      * @param warnings
      * @param localIdentifier
      * @return a FHIRRestOperationResponse that contains the results of the operation
@@ -223,7 +231,7 @@ FHIRRestOperationResponse doUpdate(int entryIndex, FHIRPersistenceEvent event, E
     FHIRRestOperationResponse doPatch(int entryIndex, FHIRPersistenceEvent event, Entry validationResponseEntry,
             String requestDescription, FHIRUrlParser requestURL, long accumulatedTime, String type, String id,
             Resource newResource, Resource prevResource, FHIRPatch patch, String ifMatchValue, String searchQueryString,
-            boolean skippableUpdate, List warnings, String localIdentifier) throws Exception;
+            boolean skippableUpdate, List warnings, String localIdentifier, PayloadPersistenceResponse offloadResponse) throws Exception;
 
     /**
      * Helper method which invokes a custom operation.
diff --git a/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionVisitorMeta.java b/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionVisitorMeta.java
index b88fc2f40bc..89221fa6383 100644
--- a/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionVisitorMeta.java
+++ b/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionVisitorMeta.java
@@ -41,6 +41,7 @@
 import com.ibm.fhir.persistence.context.FHIRPersistenceEvent;
 import com.ibm.fhir.persistence.exception.FHIRPersistenceResourceDeletedException;
 import com.ibm.fhir.persistence.exception.FHIRPersistenceResourceNotFoundException;
+import com.ibm.fhir.persistence.payload.PayloadPersistenceResponse;
 import com.ibm.fhir.search.SearchConstants;
 import com.ibm.fhir.search.exception.FHIRSearchException;
 import com.ibm.fhir.server.exception.FHIRRestBundledRequestException;
@@ -110,7 +111,7 @@ public FHIRRestOperationResponse doHistory(int entryIndex, String requestDescrip
     @Override
     public FHIRRestOperationResponse doCreate(int entryIndex, FHIRPersistenceEvent event, List warnings,
             Entry validationResponseEntry, String requestDescription, FHIRUrlParser requestURL, long accumulatedTime,
-            String type, Resource resource, String ifNoneExist, String localIdentifier) throws Exception {
+            String type, Resource resource, String ifNoneExist, String localIdentifier, PayloadPersistenceResponse offloadResponse) throws Exception {
         logStart(entryIndex, requestDescription, requestURL);
 
         // Skip CREATE if validation failed
@@ -161,7 +162,7 @@ public FHIRRestOperationResponse doCreate(int entryIndex, FHIRPersistenceEvent e
     public FHIRRestOperationResponse doUpdate(int entryIndex, FHIRPersistenceEvent event, Entry validationResponseEntry,
             String requestDescription, FHIRUrlParser requestURL, long accumulatedTime, String type, String id,
             Resource resource, Resource prevResource, String ifMatchValue, String searchQueryString, boolean skippableUpdate,
-            String localIdentifier, List warnings, boolean isDeleted, Integer ifNoneMatch) throws Exception {
+            String localIdentifier, List warnings, boolean isDeleted, Integer ifNoneMatch, PayloadPersistenceResponse offloadResponse) throws Exception {
         logStart(entryIndex, requestDescription, requestURL);
 
         // Skip UPDATE if validation failed
@@ -202,7 +203,7 @@ public FHIRRestOperationResponse doUpdate(int entryIndex, FHIRPersistenceEvent e
     public FHIRRestOperationResponse doPatch(int entryIndex, FHIRPersistenceEvent event, Entry validationResponseEntry,
             String requestDescription, FHIRUrlParser requestURL, long accumulatedTime, String type, String id, Resource newResource,
             Resource prevResource, FHIRPatch patch, String ifMatchValue, String searchQueryString,
-            boolean skippableUpdate, List warnings, String localIdentifier) throws Exception {
+            boolean skippableUpdate, List warnings, String localIdentifier, PayloadPersistenceResponse offloadResponse) throws Exception {
         logStart(entryIndex, requestDescription, requestURL);
         // Skip PATCH if validation failed
         // TODO the logic in the old buildLocalRefMap uses SC_OK_STRING
diff --git a/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionVisitorPersist.java b/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionVisitorPersist.java
index 73dec757a94..ebc044063a6 100644
--- a/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionVisitorPersist.java
+++ b/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionVisitorPersist.java
@@ -29,6 +29,7 @@
 import com.ibm.fhir.persistence.exception.FHIRPersistenceIfNoneMatchException;
 import com.ibm.fhir.persistence.exception.FHIRPersistenceResourceDeletedException;
 import com.ibm.fhir.persistence.exception.FHIRPersistenceResourceNotFoundException;
+import com.ibm.fhir.persistence.payload.PayloadPersistenceResponse;
 import com.ibm.fhir.search.exception.FHIRSearchException;
 import com.ibm.fhir.server.exception.FHIRRestBundledRequestException;
 import com.ibm.fhir.server.spi.operation.FHIROperationContext;
@@ -143,10 +144,10 @@ public FHIRRestOperationResponse doHistory(int entryIndex, String requestDescrip
     @Override
     public FHIRRestOperationResponse doCreate(int entryIndex, FHIRPersistenceEvent event, List warnings,
             Entry validationResponseEntry, String requestDescription, FHIRUrlParser requestURL, long accumulatedTime,
-            String type, Resource resource, String ifNoneExist, String localIdentifier) throws Exception {
+            String type, Resource resource, String ifNoneExist, String localIdentifier, PayloadPersistenceResponse offloadResponse) throws Exception {
 
         doInteraction(entryIndex, requestDescription, accumulatedTime, () -> {
-            FHIRRestOperationResponse ior = helpers.doCreatePersist(event, warnings, resource);
+            FHIRRestOperationResponse ior = helpers.doCreatePersist(event, warnings, resource, offloadResponse);
 
             OperationOutcome validationOutcome = null;
             if (validationResponseEntry != null && validationResponseEntry.getResponse() != null) {
@@ -164,11 +165,11 @@ public FHIRRestOperationResponse doCreate(int entryIndex, FHIRPersistenceEvent e
     public FHIRRestOperationResponse doUpdate(int entryIndex, FHIRPersistenceEvent event, Entry validationResponseEntry,
             String requestDescription, FHIRUrlParser requestURL, long accumulatedTime, String type, String id,
             Resource newResource, Resource prevResource, String ifMatchValue, String searchQueryString,
-            boolean skippableUpdate, String localIdentifier, List warnings, boolean isDeleted, Integer ifNoneMatch) throws Exception {
+            boolean skippableUpdate, String localIdentifier, List warnings, boolean isDeleted, Integer ifNoneMatch, PayloadPersistenceResponse offloadResponse) throws Exception {
 
         doInteraction(entryIndex, requestDescription, accumulatedTime, () -> {
 
-            FHIRRestOperationResponse ior = helpers.doPatchOrUpdatePersist(event, type, id, false, newResource, prevResource, warnings, isDeleted, ifNoneMatch);
+            FHIRRestOperationResponse ior = helpers.doPatchOrUpdatePersist(event, type, id, false, newResource, prevResource, warnings, isDeleted, ifNoneMatch, offloadResponse);
             OperationOutcome validationOutcome = null;
             if (validationResponseEntry != null && validationResponseEntry.getResponse() != null) {
                 validationOutcome = validationResponseEntry.getResponse().getOutcome().as(OperationOutcome.class);
@@ -183,14 +184,14 @@ public FHIRRestOperationResponse doUpdate(int entryIndex, FHIRPersistenceEvent e
     public FHIRRestOperationResponse doPatch(int entryIndex, FHIRPersistenceEvent event, Entry validationResponseEntry,
             String requestDescription, FHIRUrlParser requestURL, long accumulatedTime, String type, String id,
             Resource newResource, Resource prevResource, FHIRPatch patch, String ifMatchValue, String searchQueryString,
-            boolean skippableUpdate, List warnings, String localIdentifier) throws Exception {
+            boolean skippableUpdate, List warnings, String localIdentifier, PayloadPersistenceResponse offloadResponse) throws Exception {
 
         // For patch, if the original resource was deleted, we'd have already thrown an error.
         // Note that the patch will have already been applied to the resource...so this is
         // really just an update as far as the persistence layer is concerned
         doInteraction(entryIndex, requestDescription, accumulatedTime, () -> {
             FHIRRestOperationResponse ior = helpers.doPatchOrUpdatePersist(event, type, id, true, newResource, prevResource,
-                warnings, false, null);
+                warnings, false, null, offloadResponse);
             OperationOutcome validationOutcome = null;
             if (validationResponseEntry != null && validationResponseEntry.getResponse() != null) {
                 validationOutcome = validationResponseEntry.getResponse().getOutcome().as(OperationOutcome.class);
diff --git a/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionVisitorReferenceMapping.java b/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionVisitorReferenceMapping.java
index 886ee6e6bde..265b4181214 100644
--- a/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionVisitorReferenceMapping.java
+++ b/fhir-server/src/main/java/com/ibm/fhir/server/rest/FHIRRestInteractionVisitorReferenceMapping.java
@@ -10,8 +10,8 @@
 
 import java.util.List;
 import java.util.Map;
+import java.util.UUID;
 import java.util.concurrent.Callable;
-import java.util.concurrent.Future;
 
 import javax.ws.rs.core.MultivaluedMap;
 import javax.ws.rs.core.Response.Status;
@@ -26,7 +26,7 @@
 import com.ibm.fhir.persistence.context.FHIRPersistenceEvent;
 import com.ibm.fhir.persistence.exception.FHIRPersistenceResourceDeletedException;
 import com.ibm.fhir.persistence.exception.FHIRPersistenceResourceNotFoundException;
-import com.ibm.fhir.persistence.payload.PayloadKey;
+import com.ibm.fhir.persistence.payload.PayloadPersistenceResponse;
 import com.ibm.fhir.search.exception.FHIRSearchException;
 import com.ibm.fhir.server.exception.FHIRRestBundledRequestException;
 import com.ibm.fhir.server.spi.operation.FHIROperationContext;
@@ -81,7 +81,10 @@ public FHIRRestOperationResponse doHistory(int entryIndex, String requestDescrip
     }
 
     @Override
-    public FHIRRestOperationResponse doCreate(int entryIndex, FHIRPersistenceEvent event, List warnings, Entry validationResponseEntry, String requestDescription, FHIRUrlParser requestURL, long accumulatedTime, String type, Resource resource, String ifNoneExist, String localIdentifier) throws Exception {
+    public FHIRRestOperationResponse doCreate(int entryIndex, FHIRPersistenceEvent event, List warnings, Entry validationResponseEntry, String requestDescription, FHIRUrlParser requestURL, long accumulatedTime, String type, Resource resource, String ifNoneExist, String localIdentifier, PayloadPersistenceResponse offloadResponse) throws Exception {
+        // Note the offloadResponse will be null when passed in to this method, because
+        // we only initiate the offload in this particular visitor - the fact that we have
+        // the parameter defined is just a side-effect of the visitor pattern we're using.
 
         // Use doOperation so we can implement common exception handling in one place
         return doOperation(entryIndex, requestDescription, accumulatedTime, () -> {
@@ -92,18 +95,19 @@ public FHIRRestOperationResponse doCreate(int entryIndex, FHIRPersistenceEvent e
             final Resource finalResource = visitor.getResult(); // finalResource immutable
 
             // Try offloading storage of the payload. The offloadResponse will be null if not supported
+            String resourcePayloadKey = UUID.randomUUID().toString();
             int newVersionNumber = Integer.parseInt(finalResource.getMeta().getVersionId().getValue());
-            Future offloadResponse = storePayload(finalResource, finalResource.getId(), newVersionNumber);
+            PayloadPersistenceResponse actualOffloadResponse = storePayload(finalResource, finalResource.getId(), newVersionNumber, resourcePayloadKey);
 
             // Pass back the updated resource so it can be used in the next phase if required
-            return new FHIRRestOperationResponse(finalResource, finalResource.getId(), offloadResponse);
+            return new FHIRRestOperationResponse(finalResource, finalResource.getId(), actualOffloadResponse);
         });
     }
 
     @Override
     public FHIRRestOperationResponse doUpdate(int entryIndex, FHIRPersistenceEvent event, Entry validationResponseEntry, String requestDescription, FHIRUrlParser requestURL,
         long accumulatedTime, String type, String id, Resource resource, Resource prevResource, String ifMatchValue, String searchQueryString,
-        boolean skippableUpdate, String localIdentifier, List warnings, boolean isDeleted, Integer ifNoneMatch) throws Exception {
+        boolean skippableUpdate, String localIdentifier, List warnings, boolean isDeleted, Integer ifNoneMatch, PayloadPersistenceResponse offloadResponse) throws Exception {
 
         // Use doOperation for common exception handling
         return doOperation(entryIndex, requestDescription, accumulatedTime, () -> {
@@ -117,10 +121,13 @@ public FHIRRestOperationResponse doUpdate(int entryIndex, FHIRPersistenceEvent e
                 addLocalRefMapping(localIdentifier, newResource);
             }
 
-            // TODO support payload offload here
+            // Try offloading storage of the payload. The offloadResponse will be null if not supported
+            String resourcePayloadKey = UUID.randomUUID().toString();
+            int newVersionNumber = Integer.parseInt(newResource.getMeta().getVersionId().getValue());
+            PayloadPersistenceResponse actualOffloadResponse = storePayload(newResource, newResource.getId(), newVersionNumber, resourcePayloadKey);
 
             // Pass back the updated resource so it can be used in the next phase
-            FHIRRestOperationResponse result = new FHIRRestOperationResponse(null, null, newResource);
+            FHIRRestOperationResponse result = new FHIRRestOperationResponse(newResource, null, actualOffloadResponse);
             result.setDeleted(isDeleted);
             
             return result;
@@ -130,7 +137,7 @@ public FHIRRestOperationResponse doUpdate(int entryIndex, FHIRPersistenceEvent e
     @Override
     public FHIRRestOperationResponse doPatch(int entryIndex, FHIRPersistenceEvent event, Entry validationResponseEntry, String requestDescription, FHIRUrlParser requestURL, long accumulatedTime,
         String type, String id, Resource resource, Resource prevResource, FHIRPatch patch, String ifMatchValue, String searchQueryString,
-        boolean skippableUpdate, List warnings, String localIdentifier) throws Exception {
+        boolean skippableUpdate, List warnings, String localIdentifier, PayloadPersistenceResponse offloadResponse) throws Exception {
         // Use doOperation for common exception handling
         return doOperation(entryIndex, requestDescription, accumulatedTime, () -> {
 
@@ -143,10 +150,13 @@ public FHIRRestOperationResponse doPatch(int entryIndex, FHIRPersistenceEvent ev
                 addLocalRefMapping(localIdentifier, newResource);
             }
 
-            // TODO support payload offload here
+            // Try offloading storage of the payload. The offloadResponse will be null if not supported
+            String resourcePayloadKey = UUID.randomUUID().toString();
+            int newVersionNumber = Integer.parseInt(newResource.getMeta().getVersionId().getValue());
+            PayloadPersistenceResponse actualOffloadResponse = storePayload(newResource, newResource.getId(), newVersionNumber, resourcePayloadKey);
 
             // Pass back the updated resource so it can be used in the next phase
-            return new FHIRRestOperationResponse(null, null, newResource);
+            return new FHIRRestOperationResponse(newResource, null, actualOffloadResponse);
         });
     }
 
@@ -178,15 +188,16 @@ public FHIRRestOperationResponse issue(int entryIndex, String requestDescription
     /**
      * If payload offloading is supported by the persistence layer, store the given resource. This
      * can be an async operation which we resolve at the end just prior to the transaction being
-     * committed. If offloading isn't supported, the persistence layer returns null and the operation
-     * is a NOP.
+     * committed. If offloading isn't enabled, the operation is a NOP and the persistence layer 
+     * returns null. 
      * @param resource
      * @param logicalId
      * @param newVersionNumber
+     * @param resourcePayloadKey
      * @return
      */
-    protected Future storePayload(Resource resource, String logicalId, int newVersionNumber) throws Exception {
-       return helpers.storePayload(resource, logicalId, newVersionNumber);
+    protected PayloadPersistenceResponse storePayload(Resource resource, String logicalId, int newVersionNumber, String resourcePayloadKey) throws Exception {
+       return helpers.storePayload(resource, logicalId, newVersionNumber, resourcePayloadKey);
     }
 
     /**
diff --git a/fhir-server/src/main/java/com/ibm/fhir/server/util/FHIRRestHelper.java b/fhir-server/src/main/java/com/ibm/fhir/server/util/FHIRRestHelper.java
index be8973730a8..4ca3604dd58 100644
--- a/fhir-server/src/main/java/com/ibm/fhir/server/util/FHIRRestHelper.java
+++ b/fhir-server/src/main/java/com/ibm/fhir/server/util/FHIRRestHelper.java
@@ -28,7 +28,6 @@
 import java.util.Map;
 import java.util.Set;
 import java.util.UUID;
-import java.util.concurrent.Future;
 import java.util.logging.Level;
 import java.util.logging.Logger;
 import java.util.stream.Collectors;
@@ -89,6 +88,7 @@
 import com.ibm.fhir.path.evaluator.FHIRPathEvaluator.EvaluationContext;
 import com.ibm.fhir.path.exception.FHIRPathException;
 import com.ibm.fhir.persistence.FHIRPersistence;
+import com.ibm.fhir.persistence.FHIRPersistenceSupport;
 import com.ibm.fhir.persistence.FHIRPersistenceTransaction;
 import com.ibm.fhir.persistence.HistorySortOrder;
 import com.ibm.fhir.persistence.InteractionStatus;
@@ -100,6 +100,7 @@
 import com.ibm.fhir.persistence.context.FHIRPersistenceContextFactory;
 import com.ibm.fhir.persistence.context.FHIRPersistenceEvent;
 import com.ibm.fhir.persistence.context.FHIRSystemHistoryContext;
+import com.ibm.fhir.persistence.context.impl.FHIRPersistenceContextImpl;
 import com.ibm.fhir.persistence.erase.EraseDTO;
 import com.ibm.fhir.persistence.exception.FHIRPersistenceException;
 import com.ibm.fhir.persistence.exception.FHIRPersistenceIfNoneMatchException;
@@ -107,8 +108,7 @@
 import com.ibm.fhir.persistence.exception.FHIRPersistenceResourceNotFoundException;
 import com.ibm.fhir.persistence.helper.FHIRTransactionHelper;
 import com.ibm.fhir.persistence.jdbc.exception.FHIRPersistenceDataAccessException;
-import com.ibm.fhir.persistence.payload.PayloadKey;
-import com.ibm.fhir.persistence.payload.PayloadPersistenceHelper;
+import com.ibm.fhir.persistence.payload.PayloadPersistenceResponse;
 import com.ibm.fhir.persistence.util.FHIRPersistenceUtil;
 import com.ibm.fhir.profile.ProfileSupport;
 import com.ibm.fhir.search.SearchConstants;
@@ -149,6 +149,11 @@ public class FHIRRestHelper implements FHIRResourceHelpers {
     private static final com.ibm.fhir.model.type.String SC_BAD_REQUEST_STRING = string(Integer.toString(SC_BAD_REQUEST));
     private static final com.ibm.fhir.model.type.String SC_ACCEPTED_STRING = string(Integer.toString(SC_ACCEPTED));
     private static final ZoneId UTC = ZoneId.of("UTC");
+    
+    // Convenience constants to make call parameters more readable
+    private static final boolean THROW_EXC_ON_NULL = true;
+    private static final boolean INCLUDE_DELETED = true;
+    private static final boolean CHECK_INTERACTION_ALLOWED = true;
 
     // default number of entries in system history if no _count is given
     private static final int DEFAULT_HISTORY_ENTRIES = 100;
@@ -207,18 +212,10 @@ public FHIRRestOperationResponse doCreate(String type, Resource resource, String
                 // Persistence event processing may modify the resource, so make sure we have the latest value
                 resource = event.getFhirResource();
 
-                int newVersionNumber = Integer.parseInt(resource.getMeta().getVersionId().getValue());
-                Future offloadResponse = storePayload(resource, resource.getId(), newVersionNumber);
-
-                // Resolve the future so that we know the payload has been stored
-                // TODO tie this into the transaction data so that we can clean up
-                // more if there's a rollback for another reason.
-                PayloadKey payloadKey = offloadResponse != null ? offloadResponse.get() : null;
-                if (payloadKey == null || payloadKey.getStatus() == PayloadKey.Status.OK) {
-                    response = doCreatePersist(event, warnings, resource);
-                } else {
-                    throw new FHIRPersistenceException("Payload offload failure. Check server logs for details.");
-                }
+                final String resourcePayloadKey = UUID.randomUUID().toString();
+                int newVersionNumber = FHIRPersistenceSupport.getMetaVersionId(resource);
+                PayloadPersistenceResponse offloadResponse = storePayload(resource, resource.getId(), newVersionNumber, resourcePayloadKey);
+                response = doCreatePersist(event, warnings, resource, offloadResponse);
             }
 
             // At this point, we can be sure the transaction must have been started, so always commit
@@ -336,7 +333,7 @@ public FHIRRestOperationResponse doCreateMeta(FHIRPersistenceEvent event, List warnings, Resource resource) throws Exception {
+    public FHIRRestOperationResponse doCreatePersist(FHIRPersistenceEvent event, List warnings, Resource resource, PayloadPersistenceResponse offloadResponse) throws Exception {
         log.entering(this.getClass().getName(), "doCreatePersist");
 
         FHIRRestOperationResponse ior = new FHIRRestOperationResponse();
@@ -353,7 +350,10 @@ public FHIRRestOperationResponse doCreatePersist(FHIRPersistenceEvent event, Lis
             checkIdAndMeta(resource);
 
             // create the resource and return the location header.
-            final FHIRPersistenceContext persistenceContext = FHIRPersistenceContextFactory.createPersistenceContext(event);
+            final FHIRPersistenceContext persistenceContext = 
+                    FHIRPersistenceContextImpl.builder(event)
+                    .withOffloadResponse(offloadResponse)
+                    .build();
 
             // For 1869 bundle processing, the resource is updated first and is no longer mutated by the
             // persistence layer.
@@ -450,10 +450,15 @@ private FHIRRestOperationResponse doPatchOrUpdate(String type, String id, FHIRPa
                 return metaResponse;
             }
 
+            // Store the payload if we're offloading
+            final String resourcePayloadKey = UUID.randomUUID().toString();
+            int newVersionNumber = FHIRPersistenceSupport.getMetaVersionId(metaResponse.getResource());
+            PayloadPersistenceResponse offloadResponse = storePayload(metaResponse.getResource(), metaResponse.getResource().getId(), newVersionNumber, resourcePayloadKey);
+
             // Persist the resource
             FHIRRestOperationResponse ior = doPatchOrUpdatePersist(event, type, id, patch != null,
                     metaResponse.getResource(), metaResponse.getPrevResource(), warnings, metaResponse.isDeleted(),
-                    ifNoneMatch);
+                    ifNoneMatch, offloadResponse);
 
             txn.commit();
             txn = null;
@@ -704,7 +709,7 @@ public FHIRRestOperationResponse doUpdateMeta(FHIRPersistenceEvent event, String
             // again under a database lock during the persistence phase and the request will be rejected if there's
             // a mismatch (can happen when there are concurrent updates).
             final com.ibm.fhir.model.type.Instant lastUpdated = com.ibm.fhir.model.type.Instant.now(ZoneOffset.UTC);
-            final int newVersionNumber = updateCreate ? 1 : Integer.parseInt(ior.getPrevResource().getMeta().getVersionId().getValue()) + 1;
+            final int newVersionNumber = updateCreate ? 1 : FHIRPersistenceSupport.getMetaVersionId(ior.getPrevResource()) + 1;
             newResource = FHIRPersistenceUtil.copyAndSetResourceMetaFields(newResource, newResource.getId(), newVersionNumber, lastUpdated);
 
             ior.setResource(newResource);
@@ -723,7 +728,7 @@ public FHIRRestOperationResponse doUpdateMeta(FHIRPersistenceEvent event, String
     @Override
     public FHIRRestOperationResponse doPatchOrUpdatePersist(FHIRPersistenceEvent event, String type, String id,
             boolean isPatch, Resource newResource, Resource prevResource,
-            List warnings, boolean isDeleted, Integer ifNoneMatch) throws Exception {
+            List warnings, boolean isDeleted, Integer ifNoneMatch, PayloadPersistenceResponse offloadResponse) throws Exception {
         log.entering(this.getClass().getName(), "doPatchOrUpdatePersist");
 
         // We'll only start a new transaction here if we don't have one. We'll only
@@ -745,7 +750,11 @@ public FHIRRestOperationResponse doPatchOrUpdatePersist(FHIRPersistenceEvent eve
             checkIdAndMeta(newResource);
 
             FHIRPersistenceContext persistenceContext =
-                    FHIRPersistenceContextFactory.createPersistenceContext(event, ifNoneMatch);
+                    FHIRPersistenceContextImpl.builder(event)
+                    .withIfNoneMatch(ifNoneMatch)
+                    .withOffloadResponse(offloadResponse)
+                    .build();
+            
             boolean createOnUpdate = (prevResource == null);
             final SingleResourceResult result;
             if (createOnUpdate) {
@@ -948,7 +957,7 @@ public FHIRRestOperationResponse doDelete(String type, String id, String searchQ
 
                 // Read the resource so it will be available to the beforeDelete interceptor methods.
                 try {
-                    resourceToDelete = doRead(type, id, false, false, null, null, false).getResource();
+                    resourceToDelete = doRead(type, id, !THROW_EXC_ON_NULL, !INCLUDE_DELETED, null, null, !CHECK_INTERACTION_ALLOWED).getResource();
                     if (resourceToDelete != null) {
                         responseBundle = Bundle.builder().type(BundleType.SEARCHSET)
                                 .id(UUID.randomUUID().toString())
@@ -961,7 +970,7 @@ public FHIRRestOperationResponse doDelete(String type, String id, String searchQ
                     }
                 } catch (FHIRPersistenceResourceDeletedException e) {
                     // Absorb this exception.
-                    ior.setResource(doRead(type, id, false, true, null, null, false).getResource());
+                    ior.setResource(doRead(type, id, !THROW_EXC_ON_NULL, INCLUDE_DELETED, null, null, !CHECK_INTERACTION_ALLOWED).getResource());
                     warnings.add(buildOperationOutcomeIssue(IssueSeverity.WARNING, IssueType.DELETED, "Resource of type '"
                         + type + "' with id '" + id + "' is already deleted."));
                 }
@@ -977,22 +986,29 @@ public FHIRRestOperationResponse doDelete(String type, String id, String searchQ
                             new FHIRPersistenceEvent(null, buildPersistenceEventProperties(type, id, null, null));
                     event.setFhirResource(resourceToDelete);
                     getInterceptorMgr().fireBeforeDeleteEvent(event);
-
+                    
+                    // For soft-delete we store a new version of the resource with the deleted
+                    // flag set. Update the resource meta so that it has the correct version id
+                    final String resourcePayloadKey = UUID.randomUUID().toString();
+                    final com.ibm.fhir.model.type.Instant lastUpdated = com.ibm.fhir.model.type.Instant.now(ZoneOffset.UTC);
+                    final int newVersionNumber = FHIRPersistenceSupport.getMetaVersionId(resourceToDelete) + 1;
+                    final Resource resource = FHIRPersistenceUtil.copyAndSetResourceMetaFields(resourceToDelete, id, newVersionNumber, lastUpdated);
+
+                    // If we're offloading, the payload gets stored outside the RDBMS
+                    PayloadPersistenceResponse offloadResponse = storePayload(resource, resource.getId(), newVersionNumber, resourcePayloadKey);
                     FHIRPersistenceContext persistenceContext =
-                            FHIRPersistenceContextFactory.createPersistenceContext(event);
+                            FHIRPersistenceContextImpl.builder(event)
+                            .withOffloadResponse(offloadResponse)
+                            .build();
 
-                    SingleResourceResult result = persistence.delete(persistenceContext, resourceType, id);
-                    if (result.getOutcome() != null) {
-                        warnings.addAll(result.getOutcome().getIssue());
-                    }
-                    Resource resource = result.getResource();
-                    event.setFhirResource(resource);
+                    persistence.deleteWithMeta(persistenceContext, resource);
 
                     if (responseBundle.getEntry().size() == 1) {
                         ior.setResource(resource);
                     }
 
                     // Invoke the 'afterDelete' interceptor methods.
+                    event.setFhirResource(resource);
                     getInterceptorMgr().fireAfterDeleteEvent(event);
                 }
 
@@ -3260,13 +3276,13 @@ public String generateResourceId() {
      * @return current time in UTC
      */
     protected com.ibm.fhir.model.type.Instant getCurrentInstant() {
-        return PayloadPersistenceHelper.getCurrentInstant();
+        return FHIRPersistenceSupport.getCurrentInstant();
     }
 
     @Override
-    public Future storePayload(Resource resource, String logicalId, int newVersionNumber) throws Exception {
+    public PayloadPersistenceResponse storePayload(Resource resource, String logicalId, int newVersionNumber, String resourcePayloadKey) throws Exception {
 
         // Delegate to the persistence layer. Result will be null if offloading is not supported
-        return persistence.storePayload(resource, logicalId, newVersionNumber);
+        return persistence.storePayload(resource, logicalId, newVersionNumber, resourcePayloadKey);
     }
-}
+}
\ No newline at end of file
diff --git a/fhir-server/src/test/java/com/ibm/fhir/server/test/MockPersistenceImpl.java b/fhir-server/src/test/java/com/ibm/fhir/server/test/MockPersistenceImpl.java
index 8b8cc778b33..37efd53c3e3 100644
--- a/fhir-server/src/test/java/com/ibm/fhir/server/test/MockPersistenceImpl.java
+++ b/fhir-server/src/test/java/com/ibm/fhir/server/test/MockPersistenceImpl.java
@@ -7,7 +7,6 @@
 
 import java.util.ArrayList;
 import java.util.List;
-import java.util.concurrent.Future;
 import java.util.function.Function;
 
 import com.ibm.fhir.model.resource.OperationOutcome;
@@ -28,7 +27,7 @@
 import com.ibm.fhir.persistence.context.FHIRPersistenceContext;
 import com.ibm.fhir.persistence.exception.FHIRPersistenceException;
 import com.ibm.fhir.persistence.exception.FHIRPersistenceResourceDeletedException;
-import com.ibm.fhir.persistence.payload.PayloadKey;
+import com.ibm.fhir.persistence.payload.PayloadPersistenceResponse;
 import com.ibm.fhir.server.util.FHIRRestHelperTest;
 
 /**
@@ -165,6 +164,12 @@ public  SingleResourceResult delete(FHIRPersistenceContex
         return resultBuilder.build();
     }
 
+    @SuppressWarnings("unchecked")
+    @Override
+    public  void deleteWithMeta(FHIRPersistenceContext context, T resource) throws FHIRPersistenceException {
+        // NOP. No need to do anything in this very simple mock
+    }
+
     @Override
     public ResourcePayload fetchResourcePayloads(Class resourceType, java.time.Instant fromLastModified,
         java.time.Instant toLastModified, Function process) throws FHIRPersistenceException {
@@ -194,7 +199,7 @@ public List retrieveIndex(int count, java.time.Instant notModifiedAfter, L
     }
 
     @Override
-    public Future storePayload(Resource resource, String logicalId, int newVersionNumber) throws FHIRPersistenceException {
+    public PayloadPersistenceResponse storePayload(Resource resource, String logicalId, int newVersionNumber, String resourcePayloadKey) throws FHIRPersistenceException {
         // NOP
         return null;
     }
diff --git a/fhir-server/src/test/java/com/ibm/fhir/server/test/ServerResolveFunctionTest.java b/fhir-server/src/test/java/com/ibm/fhir/server/test/ServerResolveFunctionTest.java
index cce84ea52f0..0b706c7c473 100644
--- a/fhir-server/src/test/java/com/ibm/fhir/server/test/ServerResolveFunctionTest.java
+++ b/fhir-server/src/test/java/com/ibm/fhir/server/test/ServerResolveFunctionTest.java
@@ -21,7 +21,6 @@
 import java.util.List;
 import java.util.Map;
 import java.util.UUID;
-import java.util.concurrent.Future;
 import java.util.function.Function;
 
 import org.testng.annotations.BeforeClass;
@@ -57,7 +56,7 @@
 import com.ibm.fhir.persistence.context.FHIRPersistenceContextFactory;
 import com.ibm.fhir.persistence.exception.FHIRPersistenceException;
 import com.ibm.fhir.persistence.helper.PersistenceHelper;
-import com.ibm.fhir.persistence.payload.PayloadKey;
+import com.ibm.fhir.persistence.payload.PayloadPersistenceResponse;
 import com.ibm.fhir.server.resolve.ServerResolveFunction;
 
 public class ServerResolveFunctionTest {
@@ -474,7 +473,8 @@ public List retrieveIndex(int count, java.time.Instant notModifiedAfter, L
         }
 
         @Override
-        public Future storePayload(Resource resource, String logicalId, int newVersionNumber) throws FHIRPersistenceException {
+        public PayloadPersistenceResponse storePayload(Resource resource, String logicalId, int newVersionNumber, String resourcePayloadKey) throws FHIRPersistenceException {
+            // TODO Auto-generated method stub
             return null;
         }
 
diff --git a/fhir-smart/src/test/java/com/ibm/fhir/smart/test/MockPersistenceImpl.java b/fhir-smart/src/test/java/com/ibm/fhir/smart/test/MockPersistenceImpl.java
index 48a9802692f..a94fd8f501a 100644
--- a/fhir-smart/src/test/java/com/ibm/fhir/smart/test/MockPersistenceImpl.java
+++ b/fhir-smart/src/test/java/com/ibm/fhir/smart/test/MockPersistenceImpl.java
@@ -10,7 +10,6 @@
 
 import java.time.Instant;
 import java.util.List;
-import java.util.concurrent.Future;
 import java.util.function.Function;
 
 import com.ibm.fhir.model.resource.Encounter;
@@ -32,7 +31,7 @@
 import com.ibm.fhir.persistence.exception.FHIRPersistenceException;
 import com.ibm.fhir.persistence.exception.FHIRPersistenceResourceDeletedException;
 import com.ibm.fhir.persistence.exception.FHIRPersistenceResourceNotFoundException;
-import com.ibm.fhir.persistence.payload.PayloadKey;
+import com.ibm.fhir.persistence.payload.PayloadPersistenceResponse;
 
 /**
  * Mock implementation of FHIRPersistence for use during testing.
@@ -205,8 +204,8 @@ public List retrieveIndex(int count, java.time.Instant notModifiedAfter, L
     }
 
     @Override
-    public Future storePayload(Resource resource, String logicalId, int newVersionNumber)
-        throws FHIRPersistenceException {
+    public PayloadPersistenceResponse storePayload(Resource resource, String logicalId, int newVersionNumber, String resourcePayloadKey)
+            throws FHIRPersistenceException {
         return null;
     }
 
diff --git a/operation/fhir-operation-erase/src/test/java/com/ibm/fhir/operation/erase/mock/MockFHIRResourceHelpers.java b/operation/fhir-operation-erase/src/test/java/com/ibm/fhir/operation/erase/mock/MockFHIRResourceHelpers.java
index c0f0d809812..c00fc962501 100644
--- a/operation/fhir-operation-erase/src/test/java/com/ibm/fhir/operation/erase/mock/MockFHIRResourceHelpers.java
+++ b/operation/fhir-operation-erase/src/test/java/com/ibm/fhir/operation/erase/mock/MockFHIRResourceHelpers.java
@@ -11,7 +11,6 @@
 import java.util.List;
 import java.util.Map;
 import java.util.UUID;
-import java.util.concurrent.Future;
 
 import javax.ws.rs.core.MultivaluedMap;
 
@@ -29,7 +28,7 @@
 import com.ibm.fhir.persistence.context.FHIRPersistenceEvent;
 import com.ibm.fhir.persistence.erase.EraseDTO;
 import com.ibm.fhir.persistence.exception.FHIRPersistenceException;
-import com.ibm.fhir.persistence.payload.PayloadKey;
+import com.ibm.fhir.persistence.payload.PayloadPersistenceResponse;
 import com.ibm.fhir.search.context.FHIRSearchContext;
 import com.ibm.fhir.server.spi.operation.FHIROperationContext;
 import com.ibm.fhir.server.spi.operation.FHIROperationUtil;
@@ -188,7 +187,7 @@ public String generateResourceId() {
     }
 
     @Override
-    public Future storePayload(Resource resource, String logicalId, int newVersionNumber) {
+    public PayloadPersistenceResponse storePayload(Resource resource, String logicalId, int newVersionNumber, String resourcePayloadKey) {
         return null;
     }
 
@@ -203,7 +202,7 @@ public FHIRRestOperationResponse doCreateMeta(FHIRPersistenceEvent event, List warnings, Resource resource) throws Exception {
+    public FHIRRestOperationResponse doCreatePersist(FHIRPersistenceEvent event, List warnings, Resource resource, PayloadPersistenceResponse offloadResponse) throws Exception {
         return null;
     }
 
@@ -215,7 +214,7 @@ public FHIRRestOperationResponse doUpdateMeta(FHIRPersistenceEvent event, String
 
     @Override
     public FHIRRestOperationResponse doPatchOrUpdatePersist(FHIRPersistenceEvent event, String type, String id, boolean isPatch,
-        Resource newResource, Resource prevResource, List warnings, boolean isDeleted, Integer ifNoneMatch) throws Exception {
+        Resource newResource, Resource prevResource, List warnings, boolean isDeleted, Integer ifNoneMatch, PayloadPersistenceResponse offloadResponse) throws Exception {
         return null;
     }
 
diff --git a/operation/fhir-operation-member-match/src/test/java/com/ibm/fhir/operation/davinci/hrex/test/MemberMatchTest.java b/operation/fhir-operation-member-match/src/test/java/com/ibm/fhir/operation/davinci/hrex/test/MemberMatchTest.java
index 1b53f585428..3cbee74ccca 100644
--- a/operation/fhir-operation-member-match/src/test/java/com/ibm/fhir/operation/davinci/hrex/test/MemberMatchTest.java
+++ b/operation/fhir-operation-member-match/src/test/java/com/ibm/fhir/operation/davinci/hrex/test/MemberMatchTest.java
@@ -20,7 +20,6 @@
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.Future;
 
 import javax.ws.rs.core.MultivaluedMap;
 import javax.ws.rs.core.Response;
@@ -82,7 +81,7 @@
 import com.ibm.fhir.persistence.SingleResourceResult;
 import com.ibm.fhir.persistence.context.FHIRPersistenceEvent;
 import com.ibm.fhir.persistence.exception.FHIRPersistenceException;
-import com.ibm.fhir.persistence.payload.PayloadKey;
+import com.ibm.fhir.persistence.payload.PayloadPersistenceResponse;
 import com.ibm.fhir.search.context.FHIRSearchContext;
 import com.ibm.fhir.server.spi.operation.FHIROperationContext;
 import com.ibm.fhir.server.spi.operation.FHIRResourceHelpers;
@@ -1725,7 +1724,7 @@ public FHIRRestOperationResponse doCreateMeta(FHIRPersistenceEvent event, List warnings, Resource resource) throws Exception {
+        public FHIRRestOperationResponse doCreatePersist(FHIRPersistenceEvent event, List warnings, Resource resource, PayloadPersistenceResponse offloadResponse) throws Exception {
             throw new AssertionError("Unused");
         }
 
@@ -1737,7 +1736,7 @@ public FHIRRestOperationResponse doUpdateMeta(FHIRPersistenceEvent event, String
 
         @Override
         public FHIRRestOperationResponse doPatchOrUpdatePersist(FHIRPersistenceEvent event, String type, String id, boolean isPatch, Resource newResource,
-            Resource prevResource, List warnings, boolean isDeleted, Integer ifNoneMatch) throws Exception {
+            Resource prevResource, List warnings, boolean isDeleted, Integer ifNoneMatch, PayloadPersistenceResponse offloadResponse) throws Exception {
             throw new AssertionError("Unused");
         }
 
@@ -1753,7 +1752,7 @@ public String generateResourceId() {
         }
 
         @Override
-        public Future storePayload(Resource resource, String logicalId, int newVersionNumber) throws Exception {
+        public PayloadPersistenceResponse storePayload(Resource resource, String logicalId, int newVersionNumber, String resourcePayloadKey) throws Exception {
             throw new AssertionError("Unused");
         }