From 9338e809431d989548131471fa13bb9b33efac78 Mon Sep 17 00:00:00 2001 From: frcroth Date: Mon, 20 Mar 2023 16:19:06 +0100 Subject: [PATCH] Introduce data vault as storage backend abstraction (#6899) --- .../binary/credential/CredentialService.scala | 8 +- .../explore/ExploreRemoteLayerService.scala | 5 +- .../binary/explore/RemoteLayerExplorer.scala | 6 +- test/backend/DataVaultTestSuite.scala | 122 ++++ test/backend/RangeRequestTestSuite.scala | 47 -- .../dataformats/BucketProvider.scala | 15 +- .../datastore/dataformats/MagLocator.scala | 4 +- .../dataformats/n5/N5BucketProvider.scala | 4 +- .../PrecomputedBucketProvider.scala | 4 +- .../dataformats/zarr/ZarrBucketProvider.scala | 4 +- .../datastore/datareaders/ChunkReader.scala | 9 +- .../datastore/datareaders/DatasetArray.scala | 7 +- .../datareaders/FileSystemStore.scala | 37 - .../datastore/datareaders/n5/N5Array.scala | 18 +- .../datareaders/n5/N5ChunkReader.scala | 13 +- .../precomputed/PrecomputedArray.scala | 24 +- .../precomputed/PrecomputedChunkReader.scala | 11 +- .../datareaders/zarr/ZarrArray.scala | 16 +- .../datastore/datavault/DataVault.scala | 7 + .../datavault/FileSystemDataVault.scala | 10 + .../datavault/FileSystemVaultPath.scala | 69 ++ .../datavault/GoogleCloudDataVault.scala | 54 ++ .../datastore/datavault/HttpsDataVault.scala | 101 +++ .../datastore/datavault/S3DataVault.scala | 113 +++ .../datastore/datavault/VaultPath.scala | 81 +++ .../datastore/s3fs/AmazonS3Factory.java | 125 ---- .../webknossos/datastore/s3fs/LICENSE | 31 - .../datastore/s3fs/S3AccessControlList.java | 64 -- .../datastore/s3fs/S3FileChannel.java | 168 ----- .../datastore/s3fs/S3FileStore.java | 146 ---- .../s3fs/S3FileStoreAttributeView.java | 47 -- .../datastore/s3fs/S3FileSystem.java | 184 ----- .../S3FileSystemConfigurationException.java | 9 - .../datastore/s3fs/S3FileSystemProvider.java | 669 ------------------ .../webknossos/datastore/s3fs/S3Iterator.java | 192 ----- .../webknossos/datastore/s3fs/S3Path.java | 614 ---------------- .../s3fs/S3ReadOnlySeekableByteChannel.java | 158 ----- .../datastore/s3fs/S3SeekableByteChannel.java | 145 ---- .../attribute/S3BasicFileAttributeView.java | 33 - .../s3fs/attribute/S3BasicFileAttributes.java | 85 --- .../attribute/S3PosixFileAttributeView.java | 61 -- .../s3fs/attribute/S3PosixFileAttributes.java | 37 - .../s3fs/attribute/S3UserPrincipal.java | 17 - .../util/AnonymousAWSCredentialsProvider.java | 18 - .../datastore/s3fs/util/AttributesUtils.java | 100 --- .../webknossos/datastore/s3fs/util/Cache.java | 29 - .../datastore/s3fs/util/IOUtils.java | 32 - .../datastore/s3fs/util/S3Utils.java | 198 ------ .../datastore/storage/DataVaultsHolder.scala | 56 ++ .../datastore/storage/FileSystemService.scala | 8 +- .../datastore/storage/FileSystemsHolder.scala | 100 --- .../httpsfilesystem/ByteChannelOptions.java | 10 - .../httpsfilesystem/HttpsFileSystem.scala | 58 -- .../HttpsFileSystemProvider.scala | 65 -- .../storage/httpsfilesystem/HttpsPath.scala | 73 -- .../HttpsSeekableByteChannel.scala | 108 --- 56 files changed, 689 insertions(+), 3740 deletions(-) create mode 100644 test/backend/DataVaultTestSuite.scala delete mode 100644 test/backend/RangeRequestTestSuite.scala delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/FileSystemStore.scala create mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/DataVault.scala create mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/FileSystemDataVault.scala create mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/FileSystemVaultPath.scala create mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/GoogleCloudDataVault.scala create mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/HttpsDataVault.scala create mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/S3DataVault.scala create mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/VaultPath.scala delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/AmazonS3Factory.java delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/LICENSE delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3AccessControlList.java delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileChannel.java delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileStore.java delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileStoreAttributeView.java delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystem.java delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemConfigurationException.java delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemProvider.java delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3Iterator.java delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3Path.java delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3ReadOnlySeekableByteChannel.java delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3SeekableByteChannel.java delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3BasicFileAttributeView.java delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3BasicFileAttributes.java delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3PosixFileAttributeView.java delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3PosixFileAttributes.java delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3UserPrincipal.java delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/AnonymousAWSCredentialsProvider.java delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/AttributesUtils.java delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/Cache.java delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/IOUtils.java delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/S3Utils.java create mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/DataVaultsHolder.scala delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/FileSystemsHolder.scala delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/httpsfilesystem/ByteChannelOptions.java delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/httpsfilesystem/HttpsFileSystem.scala delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/httpsfilesystem/HttpsFileSystemProvider.scala delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/httpsfilesystem/HttpsPath.scala delete mode 100644 webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/httpsfilesystem/HttpsSeekableByteChannel.scala diff --git a/app/models/binary/credential/CredentialService.scala b/app/models/binary/credential/CredentialService.scala index 0d4ebd64908..fdb2d1daec7 100644 --- a/app/models/binary/credential/CredentialService.scala +++ b/app/models/binary/credential/CredentialService.scala @@ -3,7 +3,7 @@ package models.binary.credential import com.scalableminds.util.tools.Fox import com.scalableminds.webknossos.datastore.storage.{ FileSystemCredential, - FileSystemsHolder, + DataVaultsHolder, GoogleServiceAccountCredential, HttpBasicAuthCredential, S3AccessKeyCredential @@ -24,7 +24,7 @@ class CredentialService @Inject()(credentialDAO: CredentialDAO) { userId: ObjectId, organizationId: ObjectId): Option[FileSystemCredential] = uri.getScheme match { - case FileSystemsHolder.schemeHttps | FileSystemsHolder.schemeHttp => + case DataVaultsHolder.schemeHttps | DataVaultsHolder.schemeHttp => credentialIdentifier.map( username => HttpBasicAuthCredential(uri.toString, @@ -32,13 +32,13 @@ class CredentialService @Inject()(credentialDAO: CredentialDAO) { credentialSecret.getOrElse(""), userId.toString, organizationId.toString)) - case FileSystemsHolder.schemeS3 => + case DataVaultsHolder.schemeS3 => (credentialIdentifier, credentialSecret) match { case (Some(keyId), Some(secretKey)) => Some(S3AccessKeyCredential(uri.toString, keyId, secretKey, userId.toString, organizationId.toString)) case _ => None } - case FileSystemsHolder.schemeGS => + case DataVaultsHolder.schemeGS => for { secret <- credentialSecret secretJson <- tryo(Json.parse(secret)).toOption diff --git a/app/models/binary/explore/ExploreRemoteLayerService.scala b/app/models/binary/explore/ExploreRemoteLayerService.scala index b0123714369..52ec5f0a6a7 100644 --- a/app/models/binary/explore/ExploreRemoteLayerService.scala +++ b/app/models/binary/explore/ExploreRemoteLayerService.scala @@ -11,7 +11,7 @@ import com.scalableminds.webknossos.datastore.dataformats.zarr._ import com.scalableminds.webknossos.datastore.datareaders.n5.N5Header import com.scalableminds.webknossos.datastore.datareaders.zarr._ import com.scalableminds.webknossos.datastore.models.datasource._ -import com.scalableminds.webknossos.datastore.storage.{FileSystemsHolder, RemoteSourceDescriptor} +import com.scalableminds.webknossos.datastore.storage.{DataVaultsHolder, RemoteSourceDescriptor} import com.typesafe.scalalogging.LazyLogging import models.binary.credential.CredentialService import models.user.User @@ -163,8 +163,7 @@ class ExploreRemoteLayerService @Inject()(credentialService: CredentialService) requestingUser._organization) remoteSource = RemoteSourceDescriptor(uri, credentialOpt) credentialId <- Fox.runOptional(credentialOpt)(c => credentialService.insertOne(c)) ?~> "remoteFileSystem.credential.insert.failed" - fileSystem <- FileSystemsHolder.getOrCreate(remoteSource) ?~> "remoteFileSystem.setup.failed" - remotePath <- tryo(fileSystem.getPath(FileSystemsHolder.pathFromUri(remoteSource.uri))) ?~> "remoteFileSystem.getPath.failed" + remotePath <- DataVaultsHolder.getVaultPath(remoteSource) ?~> "remoteFileSystem.setup.failed" layersWithVoxelSizes <- exploreRemoteLayersForRemotePath( remotePath, credentialId.map(_.toString), diff --git a/app/models/binary/explore/RemoteLayerExplorer.scala b/app/models/binary/explore/RemoteLayerExplorer.scala index 39fd1c164f8..123cfbf9595 100644 --- a/app/models/binary/explore/RemoteLayerExplorer.scala +++ b/app/models/binary/explore/RemoteLayerExplorer.scala @@ -5,6 +5,7 @@ import com.scalableminds.util.io.ZipIO import com.scalableminds.util.tools.{Fox, FoxImplicits, JsonHelper} import com.scalableminds.webknossos.datastore.dataformats.MagLocator import com.scalableminds.webknossos.datastore.models.datasource.{DataLayer, ElementClass} +import com.scalableminds.webknossos.datastore.datavault.VaultPath import net.liftweb.util.Helpers.tryo import play.api.libs.json.Reads @@ -25,7 +26,10 @@ trait RemoteLayerExplorer extends FoxImplicits { protected def parseJsonFromPath[T: Reads](path: Path): Fox[T] = for { - fileBytes <- tryo(ZipIO.tryGunzip(Files.readAllBytes(path))) ?~> "dataSet.explore.failed.readFile" + fileBytes <- path match { + case path: VaultPath => path.readBytes() ?~> "dataSet.explore.failed.readFile" + case _ => tryo(ZipIO.tryGunzip(Files.readAllBytes(path))) ?~> "dataSet.explore.failed.readFile" + } fileAsString <- tryo(new String(fileBytes, StandardCharsets.UTF_8)).toFox ?~> "dataSet.explore.failed.readFile" parsed <- JsonHelper.parseAndValidateJson[T](fileAsString) } yield parsed diff --git a/test/backend/DataVaultTestSuite.scala b/test/backend/DataVaultTestSuite.scala new file mode 100644 index 00000000000..97ad58685cf --- /dev/null +++ b/test/backend/DataVaultTestSuite.scala @@ -0,0 +1,122 @@ +package backend + +import org.scalatestplus.play.PlaySpec + +import java.net.URI +import com.scalableminds.webknossos.datastore.datavault.{DataVault, GoogleCloudDataVault, HttpsDataVault, VaultPath} +import com.scalableminds.webknossos.datastore.storage.RemoteSourceDescriptor + +import scala.collection.immutable.NumericRange + +class DataVaultTestSuite extends PlaySpec { + + "Data vault" when { + "using Range requests" when { + val range: NumericRange[Long] = Range.Long(0, 1024, 1) + val dataKey = "32_32_40/15360-15424_8384-8448_3520-3584" // when accessed via range request, the response body is 1024 bytes long, otherwise 124.8 KB + + "with HTTP Vault" should { + "return correct response" in { + val uri = new URI("http://storage.googleapis.com/") + val vaultPath = new VaultPath(uri, HttpsDataVault.create(RemoteSourceDescriptor(uri, None))) + val bytes = + (vaultPath / s"neuroglancer-fafb-data/fafb_v14/fafb_v14_orig/$dataKey").readBytes(Some(range)).get + + assert(bytes.length == range.length) + assert(bytes.take(10).sameElements(Array(-1, -40, -1, -32, 0, 16, 74, 70, 73, 70))) + } + } + + "with Google Cloud Storage Vault" should { + "return correct response" in { + val uri = new URI("gs://neuroglancer-fafb-data/fafb_v14/fafb_v14_orig") + val vaultPath = new VaultPath(uri, GoogleCloudDataVault.create(RemoteSourceDescriptor(uri, None))) + val bytes = (vaultPath / dataKey).readBytes(Some(range)).get + + assert(bytes.length == range.length) + assert(bytes.take(10).sameElements(Array(-1, -40, -1, -32, 0, 16, 74, 70, 73, 70))) + } + } + } + "using regular requests" when { + val dataKey = "32_32_40/15360-15424_8384-8448_3520-3584" + val dataLength = 127808 + + "with HTTP Vault" should { + "return correct response" in { + val uri = new URI("http://storage.googleapis.com/") + val vaultPath = new VaultPath(uri, HttpsDataVault.create(RemoteSourceDescriptor(uri, None))) + val bytes = (vaultPath / s"neuroglancer-fafb-data/fafb_v14/fafb_v14_orig/$dataKey").readBytes().get + + assert(bytes.length == dataLength) + assert(bytes.take(10).sameElements(Array(-1, -40, -1, -32, 0, 16, 74, 70, 73, 70))) + } + } + + "with Google Cloud Storage Vault" should { + "return correct response" in { + val uri = new URI("gs://neuroglancer-fafb-data/fafb_v14/fafb_v14_orig") + val vaultPath = new VaultPath(uri, GoogleCloudDataVault.create(RemoteSourceDescriptor(uri, None))) + val bytes = (vaultPath / dataKey).readBytes().get + + assert(bytes.length == dataLength) + assert(bytes.take(10).sameElements(Array(-1, -40, -1, -32, 0, 16, 74, 70, 73, 70))) + } + } + } + + "using vault path" when { + class MockDataVault extends DataVault { + override def readBytes(path: VaultPath, range: Option[NumericRange[Long]]): Array[Byte] = ??? + } + + "Uri has no trailing slash" should { + val someUri = new URI("protocol://host/a/b") + val somePath = new VaultPath(someUri, new MockDataVault) + + "resolve child" in { + val childPath = somePath / "c" + assert(childPath.toUri.toString == s"${someUri.toString}/c") + } + + "get parent" in { + assert((somePath / "..").toString == "protocol://host/a/") + } + + "get directory" in { + assert((somePath / ".").toString == s"${someUri.toString}/") + } + + "handle sequential parameters" in { + assert((somePath / "c" / "d" / "e").toString == "protocol://host/a/b/c/d/e") + } + + "resolve relative to host with starting slash in parameter" in { + assert((somePath / "/x").toString == "protocol://host/x") + } + + "resolving path respects trailing slash" in { + assert((somePath / "x/").toString == "protocol://host/a/b/x/") + assert((somePath / "x").toString == "protocol://host/a/b/x") + } + } + "Uri has trailing slash" should { + val trailingSlashUri = new URI("protocol://host/a/b/") + val trailingSlashPath = new VaultPath(trailingSlashUri, new MockDataVault) + "resolve child" in { + val childPath = trailingSlashPath / "c" + assert(childPath.toUri.toString == s"${trailingSlashUri.toString}c") + } + + "get parent" in { + assert((trailingSlashPath / "..").toString == "protocol://host/a/") + } + + "get directory" in { + assert((trailingSlashPath / ".").toString == s"${trailingSlashUri.toString}") + } + } + + } + } +} diff --git a/test/backend/RangeRequestTestSuite.scala b/test/backend/RangeRequestTestSuite.scala deleted file mode 100644 index 526f2eff633..00000000000 --- a/test/backend/RangeRequestTestSuite.scala +++ /dev/null @@ -1,47 +0,0 @@ -package backend - -import com.google.cloud.storage.StorageOptions -import com.google.cloud.storage.contrib.nio.{CloudStorageConfiguration, CloudStorageFileSystem} -import com.scalableminds.webknossos.datastore.storage.httpsfilesystem.{HttpsFileSystem, HttpsPath} -import org.scalatestplus.play.PlaySpec - -import java.net.URI -import com.scalableminds.webknossos.datastore.datareaders.FileSystemStore - -class RangeRequestTestSuite extends PlaySpec { - - "Range requests" when { - val range = 0 to 1023 - val dataKey = "32_32_40/15360-15424_8384-8448_3520-3584" // when accessed via range request, the response body is 1024 bytes long, otherwise 124.8 KB - - "with HTTP FileSystem" should { - "return correct response" in { - val uri = new URI("http://storage.googleapis.com/") - val fileSystem = HttpsFileSystem.forUri(uri) - val httpsPath = new HttpsPath(uri, fileSystem) - val store = new FileSystemStore(httpsPath) - val bytes = store - .readByteRange(s"/neuroglancer-fafb-data/fafb_v14/fafb_v14_orig/$dataKey", range, useCustomRangeOption = true) - .get - - assert(bytes.length == range.length) - assert(bytes.take(10).sameElements(Array(-1, -40, -1, -32, 0, 16, 74, 70, 73, 70))) - } - } - - "with Google Cloud FileSystem" should { - "return correct response" in { - val uri = new URI("gs://neuroglancer-fafb-data/fafb_v14/fafb_v14_orig") - val fileSystem = CloudStorageFileSystem.forBucket("neuroglancer-fafb-data", - CloudStorageConfiguration.DEFAULT, - StorageOptions.newBuilder().build()) - val path = fileSystem.provider().getPath(uri) - val store = new FileSystemStore(path) - val bytes = store.readByteRange(s"/fafb_v14/fafb_v14_orig/$dataKey", range).get - - assert(bytes.length == range.length) - assert(bytes.take(10).sameElements(Array(-1, -40, -1, -32, 0, 16, 74, 70, 73, 70))) - } - } - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/BucketProvider.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/BucketProvider.scala index f9877b2a40f..2bf6f564f7f 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/BucketProvider.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/BucketProvider.scala @@ -1,13 +1,13 @@ package com.scalableminds.webknossos.datastore.dataformats import com.scalableminds.util.tools.{Fox, FoxImplicits} +import com.scalableminds.webknossos.datastore.datavault.{FileSystemVaultPath, VaultPath} import com.scalableminds.webknossos.datastore.models.BucketPosition import com.scalableminds.webknossos.datastore.models.requests.DataReadInstruction import com.scalableminds.webknossos.datastore.storage.{DataCubeCache, FileSystemService} import com.typesafe.scalalogging.LazyLogging import net.liftweb.common.Empty -import java.nio.file.Path import scala.concurrent.ExecutionContext trait BucketProvider extends FoxImplicits with LazyLogging { @@ -45,12 +45,13 @@ trait BucketProvider extends FoxImplicits with LazyLogging { Iterator.empty protected def localPathFrom(readInstruction: DataReadInstruction, relativeMagPath: String)( - implicit ec: ExecutionContext): Fox[Path] = { - val magPath = readInstruction.baseDir - .resolve(readInstruction.dataSource.id.team) - .resolve(readInstruction.dataSource.id.name) - .resolve(readInstruction.dataLayer.name) - .resolve(relativeMagPath) + implicit ec: ExecutionContext): Fox[VaultPath] = { + val magPath = FileSystemVaultPath.fromPath( + readInstruction.baseDir + .resolve(readInstruction.dataSource.id.team) + .resolve(readInstruction.dataSource.id.name) + .resolve(readInstruction.dataLayer.name) + .resolve(relativeMagPath)) if (magPath.toFile.exists()) { Fox.successful(magPath) } else Fox.empty diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/MagLocator.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/MagLocator.scala index 4510ec41572..30f92c8761c 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/MagLocator.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/MagLocator.scala @@ -3,7 +3,7 @@ package com.scalableminds.webknossos.datastore.dataformats import com.scalableminds.util.geometry.Vec3Int import com.scalableminds.webknossos.datastore.datareaders.AxisOrder import com.scalableminds.webknossos.datastore.models.datasource.ResolutionFormatHelper -import com.scalableminds.webknossos.datastore.storage.{FileSystemsHolder, LegacyFileSystemCredential} +import com.scalableminds.webknossos.datastore.storage.{DataVaultsHolder, LegacyFileSystemCredential} import play.api.libs.json.{Json, OFormat} import java.net.URI @@ -17,7 +17,7 @@ case class MagLocator(mag: Vec3Int, lazy val pathWithFallback: String = path.getOrElse(mag.toMagLiteral(allowScalar = true)) lazy val uri: URI = new URI(pathWithFallback) - lazy val isRemote: Boolean = FileSystemsHolder.isSupportedRemoteScheme(uri.getScheme) + lazy val isRemote: Boolean = DataVaultsHolder.isSupportedRemoteScheme(uri.getScheme) } object MagLocator extends ResolutionFormatHelper { diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/n5/N5BucketProvider.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/n5/N5BucketProvider.scala index c164d6d407a..b0980ceb524 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/n5/N5BucketProvider.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/n5/N5BucketProvider.scala @@ -5,6 +5,7 @@ import com.scalableminds.util.requestlogging.RateLimitedErrorLogging import com.scalableminds.util.tools.Fox import com.scalableminds.webknossos.datastore.dataformats.{BucketProvider, DataCubeHandle, MagLocator} import com.scalableminds.webknossos.datastore.datareaders.n5.N5Array +import com.scalableminds.webknossos.datastore.datavault.VaultPath import com.scalableminds.webknossos.datastore.models.BucketPosition import com.scalableminds.webknossos.datastore.models.requests.DataReadInstruction import com.scalableminds.webknossos.datastore.storage.FileSystemService @@ -12,7 +13,6 @@ import com.typesafe.scalalogging.LazyLogging import net.liftweb.common.{Empty, Failure, Full} import net.liftweb.util.Helpers.tryo -import java.nio.file.Path import scala.concurrent.ExecutionContext class N5CubeHandle(n5Array: N5Array) extends DataCubeHandle with LazyLogging with RateLimitedErrorLogging { @@ -45,7 +45,7 @@ class N5BucketProvider(layer: N5Layer, val fileSystemServiceOpt: Option[FileSyst fileSystemServiceOpt match { case Some(fileSystemService: FileSystemService) => for { - magPath: Path <- if (n5Mag.isRemote) { + magPath: VaultPath <- if (n5Mag.isRemote) { fileSystemService.remotePathFor(n5Mag) } else localPathFrom(readInstruction, n5Mag.pathWithFallback) cubeHandle <- tryo(onError = e => logError(e))(N5Array.open(magPath, n5Mag.axisOrder, n5Mag.channelIndex)) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/precomputed/PrecomputedBucketProvider.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/precomputed/PrecomputedBucketProvider.scala index 5e7586f8179..4ffdd15d7c9 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/precomputed/PrecomputedBucketProvider.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/precomputed/PrecomputedBucketProvider.scala @@ -5,6 +5,7 @@ import com.scalableminds.util.requestlogging.RateLimitedErrorLogging import com.scalableminds.util.tools.Fox import com.scalableminds.webknossos.datastore.dataformats.{BucketProvider, DataCubeHandle, MagLocator} import com.scalableminds.webknossos.datastore.datareaders.precomputed.PrecomputedArray +import com.scalableminds.webknossos.datastore.datavault.VaultPath import com.scalableminds.webknossos.datastore.models.BucketPosition import com.scalableminds.webknossos.datastore.models.requests.DataReadInstruction import com.scalableminds.webknossos.datastore.storage.FileSystemService @@ -12,7 +13,6 @@ import com.typesafe.scalalogging.LazyLogging import net.liftweb.common.{Empty, Failure, Full} import net.liftweb.util.Helpers.tryo -import java.nio.file.Path import scala.concurrent.ExecutionContext class PrecomputedCubeHandle(precomputedArray: PrecomputedArray) @@ -48,7 +48,7 @@ class PrecomputedBucketProvider(layer: PrecomputedLayer, val fileSystemServiceOp fileSystemServiceOpt match { case Some(fileSystemService: FileSystemService) => for { - magPath: Path <- if (precomputedMag.isRemote) { + magPath: VaultPath <- if (precomputedMag.isRemote) { fileSystemService.remotePathFor(precomputedMag) } else localPathFrom(readInstruction, precomputedMag.pathWithFallback) cubeHandle <- tryo(onError = e => logError(e))( diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/zarr/ZarrBucketProvider.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/zarr/ZarrBucketProvider.scala index ec352a48a82..e26c9e9504a 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/zarr/ZarrBucketProvider.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/dataformats/zarr/ZarrBucketProvider.scala @@ -5,6 +5,7 @@ import com.scalableminds.util.requestlogging.RateLimitedErrorLogging import com.scalableminds.util.tools.Fox import com.scalableminds.webknossos.datastore.dataformats.{BucketProvider, DataCubeHandle, MagLocator} import com.scalableminds.webknossos.datastore.datareaders.zarr.ZarrArray +import com.scalableminds.webknossos.datastore.datavault.VaultPath import com.scalableminds.webknossos.datastore.models.BucketPosition import com.scalableminds.webknossos.datastore.models.requests.DataReadInstruction import com.scalableminds.webknossos.datastore.storage.FileSystemService @@ -12,7 +13,6 @@ import com.typesafe.scalalogging.LazyLogging import net.liftweb.common.{Empty, Failure, Full} import net.liftweb.util.Helpers.tryo -import java.nio.file.Path import scala.concurrent.ExecutionContext class ZarrCubeHandle(zarrArray: ZarrArray) extends DataCubeHandle with LazyLogging with RateLimitedErrorLogging { @@ -45,7 +45,7 @@ class ZarrBucketProvider(layer: ZarrLayer, val fileSystemServiceOpt: Option[File fileSystemServiceOpt match { case Some(fileSystemService: FileSystemService) => for { - magPath: Path <- if (zarrMag.isRemote) { + magPath: VaultPath <- if (zarrMag.isRemote) { fileSystemService.remotePathFor(zarrMag) } else localPathFrom(readInstruction, zarrMag.pathWithFallback) cubeHandle <- tryo(onError = e => logError(e))( diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/ChunkReader.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/ChunkReader.scala index 57e78d72e88..16f6fff434a 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/ChunkReader.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/ChunkReader.scala @@ -1,5 +1,6 @@ package com.scalableminds.webknossos.datastore.datareaders +import com.scalableminds.webknossos.datastore.datavault.VaultPath import com.typesafe.scalalogging.LazyLogging import ucar.ma2.{Array => MultiArray, DataType => MADataType} @@ -9,8 +10,8 @@ import scala.concurrent.Future import scala.util.Using object ChunkReader { - def create(store: FileSystemStore, header: DatasetHeader): ChunkReader = - new ChunkReader(header, store, createChunkTyper(header)) + def create(vaultPath: VaultPath, header: DatasetHeader): ChunkReader = + new ChunkReader(header, vaultPath, createChunkTyper(header)) def createChunkTyper(header: DatasetHeader): ChunkTyper = header.resolvedDataType match { @@ -23,7 +24,7 @@ object ChunkReader { } } -class ChunkReader(val header: DatasetHeader, val store: FileSystemStore, val chunkTyper: ChunkTyper) { +class ChunkReader(val header: DatasetHeader, val vaultPath: VaultPath, val chunkTyper: ChunkTyper) { lazy val chunkSize: Int = header.chunkSize.toList.product @throws[IOException] @@ -36,7 +37,7 @@ class ChunkReader(val header: DatasetHeader, val store: FileSystemStore, val chu // and chunk shape (optional, only for data formats where each chunk reports its own shape, e.g. N5) protected def readChunkBytesAndShape(path: String): Option[(Array[Byte], Option[Array[Int]])] = Using.Manager { use => - store.readBytes(path).map { bytes => + (vaultPath / path).readBytes().map { bytes => val is = use(new ByteArrayInputStream(bytes)) val os = use(new ByteArrayOutputStream()) header.compressorImpl.uncompress(is, os) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/DatasetArray.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/DatasetArray.scala index 02af4a302c3..df06c55537a 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/DatasetArray.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/DatasetArray.scala @@ -6,6 +6,7 @@ import com.scalableminds.util.geometry.Vec3Int import com.scalableminds.util.tools.Fox import com.scalableminds.util.tools.Fox.option2Fox import com.scalableminds.webknossos.datastore.datareaders.zarr.BytesConverter +import com.scalableminds.webknossos.datastore.datavault.VaultPath import com.typesafe.scalalogging.LazyLogging import ucar.ma2.{InvalidRangeException, Array => MultiArray} @@ -15,14 +16,14 @@ import java.util import scala.concurrent.{ExecutionContext, Future} class DatasetArray(relativePath: DatasetPath, - store: FileSystemStore, + vaultPath: VaultPath, header: DatasetHeader, axisOrder: AxisOrder, channelIndex: Option[Int]) extends LazyLogging { protected val chunkReader: ChunkReader = - ChunkReader.create(store, header) + ChunkReader.create(vaultPath, header) // cache currently limited to 1 GB per array private lazy val chunkContentsCache: Cache[String, MultiArray] = { @@ -126,7 +127,7 @@ class DatasetArray(relativePath: DatasetPath, override def toString: String = s"${getClass.getCanonicalName} {'/${relativePath.storeKey}' axisOrder=$axisOrder shape=${header.datasetShape.mkString( - ",")} chunks=${header.chunkSize.mkString(",")} dtype=${header.dataType} fillValue=${header.fillValueNumber}, ${header.compressorImpl}, byteOrder=${header.byteOrder}, store=${store.getClass.getSimpleName}}" + ",")} chunks=${header.chunkSize.mkString(",")} dtype=${header.dataType} fillValue=${header.fillValueNumber}, ${header.compressorImpl}, byteOrder=${header.byteOrder}, vault=${vaultPath.getName}}" } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/FileSystemStore.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/FileSystemStore.scala deleted file mode 100644 index 1b56e8e7a11..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/FileSystemStore.scala +++ /dev/null @@ -1,37 +0,0 @@ -package com.scalableminds.webknossos.datastore.datareaders - -import com.scalableminds.util.io.ZipIO -import com.scalableminds.webknossos.datastore.storage.httpsfilesystem.ByteChannelOptions -import net.liftweb.common.{EmptyBox, Full} -import net.liftweb.util.Helpers.tryo - -import java.nio.ByteBuffer -import java.nio.file.{Files, OpenOption, Path} - -class FileSystemStore(val internalRoot: Path) { - def readBytes(key: String): Option[Array[Byte]] = { - val path = internalRoot.resolve(key) - tryo(Files.readAllBytes(path)).toOption.map(ZipIO.tryGunzip) - } - - def readByteRange(key: String, range: Range, useCustomRangeOption: Boolean = false): Option[Array[Byte]] = { - val path = internalRoot.resolve(key) - val optionSet = new java.util.HashSet[OpenOption]() - - if (useCustomRangeOption) { - optionSet.add(ByteChannelOptions.RANGE) - } - val channel = path.getFileSystem.provider().newByteChannel(path, optionSet) - val buf = ByteBuffer.allocateDirect(range.length) - channel.position(range.start) - val box = tryo(channel.read(buf)) - buf.position(0) - box match { - case Full(length) => - val arr = new Array[Byte](length) - buf.get(arr) - Some(arr) - case box: EmptyBox => None - } - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/n5/N5Array.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/n5/N5Array.scala index 0842a1c636c..f9a3b12625b 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/n5/N5Array.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/n5/N5Array.scala @@ -5,23 +5,21 @@ import com.scalableminds.webknossos.datastore.datareaders.{ ChunkReader, DatasetArray, DatasetHeader, - DatasetPath, - FileSystemStore + DatasetPath } +import com.scalableminds.webknossos.datastore.datavault.VaultPath import com.typesafe.scalalogging.LazyLogging import play.api.libs.json.{JsError, JsSuccess, Json} import java.io.IOException import java.nio.charset.StandardCharsets -import java.nio.file.Path object N5Array extends LazyLogging { @throws[IOException] - def open(path: Path, axisOrderOpt: Option[AxisOrder], channelIndex: Option[Int]): N5Array = { - val store = new FileSystemStore(path) + def open(path: VaultPath, axisOrderOpt: Option[AxisOrder], channelIndex: Option[Int]): N5Array = { val rootPath = new DatasetPath("") val headerPath = rootPath.resolve(N5Header.FILENAME_ATTRIBUTES_JSON) - val headerBytes = store.readBytes(headerPath.storeKey) + val headerBytes = (path / headerPath.storeKey).readBytes() if (headerBytes.isEmpty) throw new IOException( "'" + N5Header.FILENAME_ATTRIBUTES_JSON + "' expected but is not readable or missing in store.") @@ -37,18 +35,18 @@ object N5Array extends LazyLogging { throw new IllegalArgumentException( f"Chunk size of this N5 Array exceeds limit of ${DatasetArray.chunkSizeLimitBytes}, got ${header.bytesPerChunk}") } - new N5Array(rootPath, store, header, axisOrderOpt.getOrElse(AxisOrder.asZyxFromRank(header.rank)), channelIndex) + new N5Array(rootPath, path, header, axisOrderOpt.getOrElse(AxisOrder.asZyxFromRank(header.rank)), channelIndex) } } class N5Array(relativePath: DatasetPath, - store: FileSystemStore, + vaultPath: VaultPath, header: DatasetHeader, axisOrder: AxisOrder, channelIndex: Option[Int]) - extends DatasetArray(relativePath, store, header, axisOrder, channelIndex) + extends DatasetArray(relativePath, vaultPath, header, axisOrder, channelIndex) with LazyLogging { override protected val chunkReader: ChunkReader = - N5ChunkReader.create(store, header) + N5ChunkReader.create(vaultPath, header) } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/n5/N5ChunkReader.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/n5/N5ChunkReader.scala index f5567163e21..3db9782aff8 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/n5/N5ChunkReader.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/n5/N5ChunkReader.scala @@ -1,14 +1,15 @@ package com.scalableminds.webknossos.datastore.datareaders.n5 -import com.scalableminds.webknossos.datastore.datareaders.{ChunkReader, DatasetHeader, FileSystemStore, ChunkTyper} +import com.scalableminds.webknossos.datastore.datareaders.{ChunkReader, ChunkTyper, DatasetHeader} +import com.scalableminds.webknossos.datastore.datavault.VaultPath import com.typesafe.scalalogging.LazyLogging import java.io.{ByteArrayInputStream, ByteArrayOutputStream} import scala.util.Using object N5ChunkReader { - def create(store: FileSystemStore, header: DatasetHeader): ChunkReader = - new N5ChunkReader(header, store, ChunkReader.createChunkTyper(header)) + def create(vaultPath: VaultPath, header: DatasetHeader): ChunkReader = + new N5ChunkReader(header, vaultPath, ChunkReader.createChunkTyper(header)) } // N5 allows for a 'varmode' which means that the number of elements in the chunk can deviate from the set chunk size. @@ -16,8 +17,8 @@ object N5ChunkReader { // Here, we provide only provide one implementation to handle the `varmode`: // N5ChunkReader, always fills the chunk to the bytes necessary -class N5ChunkReader(header: DatasetHeader, store: FileSystemStore, typedChunkReader: ChunkTyper) - extends ChunkReader(header, store, typedChunkReader) +class N5ChunkReader(header: DatasetHeader, vaultPath: VaultPath, typedChunkReader: ChunkTyper) + extends ChunkReader(header, vaultPath, typedChunkReader) with LazyLogging { val dataExtractor: N5DataExtractor = new N5DataExtractor @@ -36,7 +37,7 @@ class N5ChunkReader(header: DatasetHeader, store: FileSystemStore, typedChunkRea } for { - bytes <- store.readBytes(path) + bytes <- (vaultPath / path).readBytes() (blockHeader, data) = dataExtractor.readBytesAndHeader(bytes) paddedChunkBytes = processBytes(data, blockHeader.blockSize.product) } yield (paddedChunkBytes, Some(blockHeader.blockSize)) diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/precomputed/PrecomputedArray.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/precomputed/PrecomputedArray.scala index adca5007f3e..58dbaf27d55 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/precomputed/PrecomputedArray.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/precomputed/PrecomputedArray.scala @@ -1,26 +1,20 @@ package com.scalableminds.webknossos.datastore.datareaders.precomputed -import com.scalableminds.webknossos.datastore.datareaders.{ - AxisOrder, - ChunkReader, - DatasetArray, - DatasetPath, - FileSystemStore -} +import com.scalableminds.webknossos.datastore.datareaders.{AxisOrder, ChunkReader, DatasetArray, DatasetPath} +import com.scalableminds.webknossos.datastore.datavault.VaultPath import com.typesafe.scalalogging.LazyLogging import play.api.libs.json.{JsError, JsSuccess, Json} import java.io.IOException import java.nio.charset.StandardCharsets -import java.nio.file.Path object PrecomputedArray extends LazyLogging { @throws[IOException] - def open(magPath: Path, axisOrderOpt: Option[AxisOrder], channelIndex: Option[Int]): PrecomputedArray = { + def open(magPath: VaultPath, axisOrderOpt: Option[AxisOrder], channelIndex: Option[Int]): PrecomputedArray = { - val store = new FileSystemStore(magPath.getParent) + val basePath = magPath.getParent.asInstanceOf[VaultPath] val headerPath = s"${PrecomputedHeader.FILENAME_INFO}" - val headerBytes = store.readBytes(headerPath) + val headerBytes = (basePath / headerPath).readBytes() if (headerBytes.isEmpty) throw new IOException( "'" + PrecomputedHeader.FILENAME_INFO + "' expected but is not readable or missing in store.") @@ -46,7 +40,7 @@ object PrecomputedArray extends LazyLogging { } val datasetPath = new DatasetPath(key.toString) new PrecomputedArray(datasetPath, - store, + basePath, scaleHeader, axisOrderOpt.getOrElse(AxisOrder.asZyxFromRank(scaleHeader.rank)), channelIndex) @@ -54,15 +48,15 @@ object PrecomputedArray extends LazyLogging { } class PrecomputedArray(relativePath: DatasetPath, - store: FileSystemStore, + vaultPath: VaultPath, header: PrecomputedScaleHeader, axisOrder: AxisOrder, channelIndex: Option[Int]) - extends DatasetArray(relativePath, store, header, axisOrder, channelIndex) + extends DatasetArray(relativePath, vaultPath, header, axisOrder, channelIndex) with LazyLogging { override protected val chunkReader: ChunkReader = - PrecomputedChunkReader.create(store, header) + PrecomputedChunkReader.create(vaultPath, header) lazy val voxelOffset: Array[Int] = header.precomputedScale.voxel_offset.getOrElse(Array(0, 0, 0)) override protected def getChunkFilename(chunkIndex: Array[Int]): String = { diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/precomputed/PrecomputedChunkReader.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/precomputed/PrecomputedChunkReader.scala index 7019d54ccf0..45a3acbeb10 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/precomputed/PrecomputedChunkReader.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/precomputed/PrecomputedChunkReader.scala @@ -1,11 +1,12 @@ package com.scalableminds.webknossos.datastore.datareaders.precomputed -import com.scalableminds.webknossos.datastore.datareaders.{ChunkReader, DatasetHeader, FileSystemStore, ChunkTyper} +import com.scalableminds.webknossos.datastore.datareaders.{ChunkReader, ChunkTyper, DatasetHeader} +import com.scalableminds.webknossos.datastore.datavault.VaultPath object PrecomputedChunkReader { - def create(store: FileSystemStore, header: DatasetHeader): ChunkReader = - new PrecomputedChunkReader(header, store, ChunkReader.createChunkTyper(header)) + def create(vaultPath: VaultPath, header: DatasetHeader): ChunkReader = + new PrecomputedChunkReader(header, vaultPath, ChunkReader.createChunkTyper(header)) } -class PrecomputedChunkReader(header: DatasetHeader, store: FileSystemStore, typedChunkReader: ChunkTyper) - extends ChunkReader(header, store, typedChunkReader) {} +class PrecomputedChunkReader(header: DatasetHeader, vaultPath: VaultPath, typedChunkReader: ChunkTyper) + extends ChunkReader(header, vaultPath, typedChunkReader) {} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/zarr/ZarrArray.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/zarr/ZarrArray.scala index fcdccdee99c..fe6f6a4accc 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/zarr/ZarrArray.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datareaders/zarr/ZarrArray.scala @@ -2,25 +2,23 @@ package com.scalableminds.webknossos.datastore.datareaders.zarr import java.io.IOException import java.nio.charset.StandardCharsets -import java.nio.file.Path import com.scalableminds.webknossos.datastore.datareaders.{ AxisOrder, ChunkReader, DatasetArray, DatasetHeader, - FileSystemStore, DatasetPath } +import com.scalableminds.webknossos.datastore.datavault.VaultPath import com.typesafe.scalalogging.LazyLogging import play.api.libs.json.{JsError, JsSuccess, Json} object ZarrArray extends LazyLogging { @throws[IOException] - def open(path: Path, axisOrderOpt: Option[AxisOrder], channelIndex: Option[Int]): ZarrArray = { - val store = new FileSystemStore(path) + def open(path: VaultPath, axisOrderOpt: Option[AxisOrder], channelIndex: Option[Int]): ZarrArray = { val rootPath = new DatasetPath("") val headerPath = rootPath.resolve(ZarrHeader.FILENAME_DOT_ZARRAY) - val headerBytes = store.readBytes(headerPath.storeKey) + val headerBytes = (path / headerPath.storeKey).readBytes() if (headerBytes.isEmpty) throw new IOException( "'" + ZarrHeader.FILENAME_DOT_ZARRAY + "' expected but is not readable or missing in store.") @@ -36,19 +34,19 @@ object ZarrArray extends LazyLogging { throw new IllegalArgumentException( f"Chunk size of this Zarr Array exceeds limit of ${DatasetArray.chunkSizeLimitBytes}, got ${header.bytesPerChunk}") } - new ZarrArray(rootPath, store, header, axisOrderOpt.getOrElse(AxisOrder.asZyxFromRank(header.rank)), channelIndex) + new ZarrArray(rootPath, path, header, axisOrderOpt.getOrElse(AxisOrder.asZyxFromRank(header.rank)), channelIndex) } } class ZarrArray(relativePath: DatasetPath, - store: FileSystemStore, + vaultPath: VaultPath, header: DatasetHeader, axisOrder: AxisOrder, channelIndex: Option[Int]) - extends DatasetArray(relativePath, store, header, axisOrder, channelIndex) + extends DatasetArray(relativePath, vaultPath, header, axisOrder, channelIndex) with LazyLogging { override protected val chunkReader: ChunkReader = - ChunkReader.create(store, header) + ChunkReader.create(vaultPath, header) } diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/DataVault.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/DataVault.scala new file mode 100644 index 00000000000..ec99b1e5308 --- /dev/null +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/DataVault.scala @@ -0,0 +1,7 @@ +package com.scalableminds.webknossos.datastore.datavault + +import scala.collection.immutable.NumericRange + +trait DataVault { + def readBytes(path: VaultPath, range: Option[NumericRange[Long]] = None): Array[Byte] +} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/FileSystemDataVault.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/FileSystemDataVault.scala new file mode 100644 index 00000000000..5d4c77af3df --- /dev/null +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/FileSystemDataVault.scala @@ -0,0 +1,10 @@ +package com.scalableminds.webknossos.datastore.datavault + +import scala.collection.immutable.NumericRange + +class FileSystemDataVault extends DataVault { + override def readBytes(path: VaultPath, range: Option[NumericRange[Long]]): Array[Byte] = ??? +} +object FileSystemDataVault { + def create: FileSystemDataVault = new FileSystemDataVault +} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/FileSystemVaultPath.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/FileSystemVaultPath.scala new file mode 100644 index 00000000000..81c7333d3b9 --- /dev/null +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/FileSystemVaultPath.scala @@ -0,0 +1,69 @@ +package com.scalableminds.webknossos.datastore.datavault + +import java.net.URI +import java.nio.ByteBuffer +import java.nio.file.{FileSystem, Files, LinkOption, Path, WatchEvent, WatchKey, WatchService} +import scala.collection.immutable.NumericRange + +class FileSystemVaultPath(basePath: Path) extends VaultPath(uri = new URI(""), dataVault = FileSystemDataVault.create) { + + override def readBytesGet(range: Option[NumericRange[Long]]): Array[Byte] = + range match { + case Some(r) => + val channel = Files.newByteChannel(basePath) + val buf = ByteBuffer.allocateDirect(r.length) + channel.position(r.start) + channel.read(buf) + buf.position(0) + val arr = new Array[Byte](r.length) + buf.get(arr) + arr + case None => Files.readAllBytes(basePath) + } + + override def getFileSystem: FileSystem = basePath.getFileSystem + + override def isAbsolute: Boolean = basePath.isAbsolute + + override def getRoot: Path = new FileSystemVaultPath(basePath.getRoot) + + override def getFileName: Path = new FileSystemVaultPath(basePath.getFileName) + + override def getParent: Path = new FileSystemVaultPath(basePath.getParent) + + override def getNameCount: Int = basePath.getNameCount + + override def getName(index: Int): Path = new FileSystemVaultPath(basePath.getName(index)) + + override def subpath(beginIndex: Int, endIndex: Int): Path = basePath.subpath(beginIndex: Int, endIndex: Int) + + override def startsWith(other: Path): Boolean = basePath.startsWith(other) + + override def endsWith(other: Path): Boolean = basePath.endsWith(other) + + override def normalize(): Path = new FileSystemVaultPath(basePath.normalize()) + + override def resolve(other: Path): Path = new FileSystemVaultPath(basePath.resolve(other)) + + override def /(key: String): VaultPath = new FileSystemVaultPath(basePath.resolve(key)) + + override def relativize(other: Path): Path = new FileSystemVaultPath(basePath.relativize(other)) + + override def toUri: URI = basePath.toUri + + override def toAbsolutePath: Path = new FileSystemVaultPath(basePath.toAbsolutePath) + + override def toRealPath(options: LinkOption*): Path = ??? + + override def register(watcher: WatchService, + events: Array[WatchEvent.Kind[_]], + modifiers: WatchEvent.Modifier*): WatchKey = ??? + + override def compareTo(other: Path): Int = basePath.compareTo(other) + + override def toString: String = basePath.toString +} + +object FileSystemVaultPath { + def fromPath(path: Path): FileSystemVaultPath = new FileSystemVaultPath(path) +} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/GoogleCloudDataVault.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/GoogleCloudDataVault.scala new file mode 100644 index 00000000000..29dd585aa48 --- /dev/null +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/GoogleCloudDataVault.scala @@ -0,0 +1,54 @@ +package com.scalableminds.webknossos.datastore.datavault + +import com.google.auth.oauth2.ServiceAccountCredentials +import com.google.cloud.storage.{BlobId, Storage, StorageOptions} +import com.scalableminds.webknossos.datastore.storage.{GoogleServiceAccountCredential, RemoteSourceDescriptor} + +import java.io.ByteArrayInputStream +import java.net.URI +import java.nio.ByteBuffer +import scala.collection.immutable.NumericRange + +class GoogleCloudDataVault(uri: URI, credential: Option[GoogleServiceAccountCredential]) extends DataVault { + + lazy val storageOptions: StorageOptions = credential match { + case Some(credential: GoogleServiceAccountCredential) => + StorageOptions + .newBuilder() + .setCredentials( + ServiceAccountCredentials.fromStream(new ByteArrayInputStream(credential.secretJson.toString.getBytes))) + .build() + case _ => StorageOptions.newBuilder().build() + } + + private lazy val storage: Storage = storageOptions.getService + + private lazy val bucket: String = uri.getHost + + override def readBytes(path: VaultPath, range: Option[NumericRange[Long]]): Array[Byte] = { + val objName = path.toUri.getPath.tail + val blobId = BlobId.of(bucket, objName) + range match { + case Some(r) => { + val blobReader = storage.reader(blobId) + blobReader.seek(r.start) + blobReader.limit(r.end) + val bb = ByteBuffer.allocateDirect(r.length) + blobReader.read(bb) + val arr = new Array[Byte](r.length) + bb.position(0) + bb.get(arr) + arr + } + case None => storage.readAllBytes(bucket, objName) + } + + } +} + +object GoogleCloudDataVault { + def create(remoteSourceDescriptor: RemoteSourceDescriptor): GoogleCloudDataVault = { + val credential = remoteSourceDescriptor.credential.map(f => f.asInstanceOf[GoogleServiceAccountCredential]) + new GoogleCloudDataVault(remoteSourceDescriptor.uri, credential) + } +} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/HttpsDataVault.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/HttpsDataVault.scala new file mode 100644 index 00000000000..f88e508fb77 --- /dev/null +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/HttpsDataVault.scala @@ -0,0 +1,101 @@ +package com.scalableminds.webknossos.datastore.datavault + +import com.scalableminds.webknossos.datastore.storage.{ + FileSystemCredential, + HttpBasicAuthCredential, + LegacyFileSystemCredential, + RemoteSourceDescriptor +} + +import java.net.URI +import scala.concurrent.duration.DurationInt +import sttp.client3._ +import sttp.model.Uri + +import scala.collection.immutable.NumericRange + +class HttpsDataVault(credential: Option[FileSystemCredential]) extends DataVault { + + private val connectionTimeout = 1 minute + private val readTimeout = 10 minutes + private lazy val backend = HttpClientSyncBackend(options = SttpBackendOptions.connectionTimeout(connectionTimeout)) + + def getBasicAuthCredential: Option[HttpBasicAuthCredential] = + credential.flatMap { c => + c match { + case h: HttpBasicAuthCredential => Some(h) + case l: LegacyFileSystemCredential => Some(l.toBasicAuth) + case _ => None + } + } + + private def authenticatedRequest() = + getBasicAuthCredential.map { credential => + basicRequest.auth.basic(credential.username, credential.password) + }.getOrElse( + basicRequest + ) + .readTimeout(readTimeout) + + private def headRequest(uri: URI): Request[Either[String, String], Any] = + authenticatedRequest().head(Uri(uri)) + + private val headerInfoCache = scala.collection.mutable.Map[URI, (Boolean, Long)]() + + private def getHeaderInformation(uri: URI) = + headerInfoCache.get(uri) match { + case Some((b, i)) => (b, i) + case None => { + val response: Identity[Response[Either[String, String]]] = backend.send(headRequest(uri)) + val acceptsPartialRequests = response.headers.find(_.is("Accept-Ranges")).exists(_.value == "bytes") + val dataSize = response.headers.find(_.is("Content-Length")).map(_.value.toLong).getOrElse(0L) + headerInfoCache(uri) = (acceptsPartialRequests, dataSize) + (acceptsPartialRequests, dataSize) + } + } + + private def getDataRequest(uri: URI): Request[Either[String, Array[Byte]], Any] = + authenticatedRequest().get(Uri(uri)).response(asByteArray) + + private def getRangeRequest(uri: URI, range: NumericRange[Long]): Request[Either[String, Array[Byte]], Any] = + getDataRequest(uri).header("Range", s"bytes=${range.start}-${range.end - 1}").response(asByteArray) + + private def getResponse(uri: URI): Identity[Response[Either[String, Array[Byte]]]] = { + val request: Request[Either[String, Array[Byte]], Any] = getDataRequest(uri) + backend.send(request) + } + + private def getResponseForRangeRequest(uri: URI, range: NumericRange[Long]) = { + val request = getRangeRequest(uri, range) + backend.send(request) + } + + private def fullResponse(uri: URI): Identity[Response[Either[String, Array[Byte]]]] = getResponse(uri) + + override def readBytes(path: VaultPath, range: Option[NumericRange[Long]]): Array[Byte] = { + val uri = path.toUri + val response = range match { + case Some(r) => { + val headerInfos = getHeaderInformation(uri) + if (!headerInfos._1) { + throw new UnsupportedOperationException(s"Range requests not supported for ${uri.toString}") + } + getResponseForRangeRequest(uri, r) + } + case None => fullResponse(uri) + } + + if (!response.isSuccess) { + throw new Exception(s"Https read failed for uri $uri: Response was not successful ${response.statusText}") + } + response.body match { + case Left(e) => throw new Exception(s"Https read failed for uri $uri: $e: Response empty") + case Right(bytes) => bytes + } + } +} + +object HttpsDataVault { + def create(remoteSourceDescriptor: RemoteSourceDescriptor): HttpsDataVault = + new HttpsDataVault(remoteSourceDescriptor.credential) +} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/S3DataVault.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/S3DataVault.scala new file mode 100644 index 00000000000..0c1e9304beb --- /dev/null +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/S3DataVault.scala @@ -0,0 +1,113 @@ +package com.scalableminds.webknossos.datastore.datavault + +import com.amazonaws.auth.{ + AWSCredentialsProvider, + AWSStaticCredentialsProvider, + AnonymousAWSCredentials, + BasicAWSCredentials +} +import com.amazonaws.regions.Regions +import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder} +import com.amazonaws.services.s3.model.GetObjectRequest +import com.scalableminds.webknossos.datastore.storage.{RemoteSourceDescriptor, S3AccessKeyCredential} +import org.apache.commons.io.IOUtils + +import java.io.InputStream +import java.net.URI +import scala.collection.immutable.NumericRange + +class S3DataVault(s3AccessKeyCredential: Option[S3AccessKeyCredential], uri: URI) extends DataVault { + private lazy val bucketName = S3DataVault.hostBucketFromUri(uri) match { + case Some(value) => value + case None => throw new Exception(s"Could not parse S3 bucket for ${uri.toString}") + } + + val client: AmazonS3 = + S3DataVault.getAmazonS3Client(s3AccessKeyCredential) + + private def getRangeRequest(bucketName: String, key: String, range: NumericRange[Long]): GetObjectRequest = + new GetObjectRequest(bucketName, key).withRange(range.start, range.end) + + private def getRequest(bucketName: String, key: String): GetObjectRequest = new GetObjectRequest(bucketName, key) + + override def readBytes(path: VaultPath, range: Option[NumericRange[Long]]): Array[Byte] = { + val objectKey = S3DataVault.getObjectKeyFromUri(path.toUri) match { + case Some(value) => value + case None => throw new Exception(s"Could not get key for S3 from uri: ${uri.toString}") + } + val getObjectRequest = range match { + case Some(r) => getRangeRequest(bucketName, objectKey, r) + case None => getRequest(bucketName, objectKey) + } + + val is: InputStream = + client.getObject(getObjectRequest).getObjectContent + IOUtils.toByteArray(is) + } +} + +object S3DataVault { + def create(remoteSourceDescriptor: RemoteSourceDescriptor): S3DataVault = { + val credential = remoteSourceDescriptor.credential.map(f => f.asInstanceOf[S3AccessKeyCredential]) + new S3DataVault(credential, remoteSourceDescriptor.uri) + } + + private def hostBucketFromUri(uri: URI): Option[String] = { + val host = uri.getHost + if (isShortStyle(uri)) { // assume host is omitted from uri, shortcut form s3://bucket/key + Some(host) + } else if (isVirtualHostedStyle(uri)) { + Some(host.substring(0, host.length - ".s3.amazonaws.com".length)) + } else if (isPathStyle(uri)) { + Some(uri.getPath.substring(1).split("/")(0)) + } else { + None + } + } + + // https://bucket-name.s3.region-code.amazonaws.com/key-name + private def isVirtualHostedStyle(uri: URI): Boolean = + uri.getHost.endsWith(".s3.amazonaws.com") + + // https://s3.region-code.amazonaws.com/bucket-name/key-name + private def isPathStyle(uri: URI): Boolean = + uri.getHost.matches("s3(.[\\w\\-_]+)?.amazonaws.com") + + // S3://bucket-name/key-name + private def isShortStyle(uri: URI): Boolean = + !uri.getHost.contains(".") + + private def getObjectKeyFromUri(uri: URI): Option[String] = + if (isVirtualHostedStyle(uri)) { + Some(uri.getPath) + } else if (isPathStyle(uri)) { + Some(uri.getPath.substring(1).split("/").tail.mkString("/")) + } else if (isShortStyle(uri)) { + Some(uri.getPath.tail) + } else { + None + } + + private def getCredentialsProvider(credentialOpt: Option[S3AccessKeyCredential]): AWSCredentialsProvider = + credentialOpt match { + case Some(s3AccessKeyCredential: S3AccessKeyCredential) => + new AWSStaticCredentialsProvider( + new BasicAWSCredentials(s3AccessKeyCredential.accessKeyId, s3AccessKeyCredential.secretAccessKey)) + case None => + new AnonymousAWSCredentialsProvider + + } + + private def getAmazonS3Client(credentialOpt: Option[S3AccessKeyCredential]): AmazonS3 = + AmazonS3ClientBuilder.standard + .withCredentials(getCredentialsProvider(credentialOpt)) + .withRegion(Regions.DEFAULT_REGION) + .withForceGlobalBucketAccessEnabled(true) + .build +} + +class AnonymousAWSCredentialsProvider extends AWSCredentialsProvider { + override def getCredentials = new AnonymousAWSCredentials + + override def refresh(): Unit = {} +} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/VaultPath.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/VaultPath.scala new file mode 100644 index 00000000000..00f71ebd766 --- /dev/null +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/datavault/VaultPath.scala @@ -0,0 +1,81 @@ +package com.scalableminds.webknossos.datastore.datavault + +import com.scalableminds.util.io.ZipIO +import net.liftweb.util.Helpers.tryo + +import java.net.URI +import java.nio.file.{FileSystem, LinkOption, Path, Paths, WatchEvent, WatchKey, WatchService} +import scala.collection.immutable.NumericRange + +/* +VaultPath implements Path so that a drop in replacement is possible while continuing to use Paths for local storage. +This class does not implement all relevant methods and it might be a good idea to remove the inheritance on Path in the +future. + */ + +class VaultPath(uri: URI, dataVault: DataVault) extends Path { + + protected def readBytesGet(range: Option[NumericRange[Long]]): Array[Byte] = + dataVault.readBytes(this, range) + + def readBytes(range: Option[NumericRange[Long]] = None): Option[Array[Byte]] = + tryo(readBytesGet(range)).toOption.map(ZipIO.tryGunzip) + + override def getFileSystem: FileSystem = ??? + + override def isAbsolute: Boolean = ??? + + override def getRoot: Path = ??? + + override def getFileName: Path = + Paths.get(uri.toString.split("/").last) + + override def getParent: Path = { + val newUri = + if (uri.getPath.endsWith("/")) uri.resolve("..") + else uri.resolve(".") + new VaultPath(newUri, dataVault) + } + + override def getNameCount: Int = ??? + + override def getName(index: Int): Path = ??? + + override def subpath(beginIndex: Int, endIndex: Int): Path = ??? + + override def startsWith(other: Path): Boolean = ??? + + override def endsWith(other: Path): Boolean = ??? + + override def normalize(): Path = ??? + + override def resolve(other: String): Path = this / other + + override def resolve(other: Path): Path = this / other.toString + + def /(key: String): VaultPath = + if (uri.toString.endsWith("/")) { + new VaultPath(uri.resolve(key), dataVault) + } else { + new VaultPath(new URI(s"${uri.toString}/").resolve(key), dataVault) + } + + override def relativize(other: Path): Path = ??? + + override def toUri: URI = + uri + + override def toAbsolutePath: Path = ??? + + override def compareTo(other: Path): Int = ??? + + override def toRealPath(options: LinkOption*): Path = ??? + + override def register(watcher: WatchService, + events: Array[WatchEvent.Kind[_]], + modifiers: WatchEvent.Modifier*): WatchKey = ??? + + override def toString: String = uri.toString + + def getName: String = s"VaultPath: ${this.toString} for ${dataVault.getClass.getSimpleName}" +} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/AmazonS3Factory.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/AmazonS3Factory.java deleted file mode 100644 index 5ae4a414f16..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/AmazonS3Factory.java +++ /dev/null @@ -1,125 +0,0 @@ -package com.scalableminds.webknossos.datastore.s3fs; - -import com.amazonaws.ClientConfiguration; -import com.amazonaws.Protocol; -import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.regions.Regions; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3Client; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.s3.S3ClientOptions; -import com.scalableminds.webknossos.datastore.s3fs.util.AnonymousAWSCredentialsProvider; - -import java.net.URI; -import java.util.Properties; - - -/** - * Factory base class to create a new AmazonS3 instance. - */ -public class AmazonS3Factory { - - public static final String ACCESS_KEY = "s3fs_access_key"; - public static final String SECRET_KEY = "s3fs_secret_key"; - public static final String REQUEST_METRIC_COLLECTOR_CLASS = "s3fs_request_metric_collector_class"; - public static final String CONNECTION_TIMEOUT = "s3fs_connection_timeout"; - public static final String MAX_CONNECTIONS = "s3fs_max_connections"; - public static final String MAX_ERROR_RETRY = "s3fs_max_retry_error"; - public static final String PROTOCOL = "s3fs_protocol"; - public static final String PROXY_DOMAIN = "s3fs_proxy_domain"; - public static final String PROXY_HOST = "s3fs_proxy_host"; - public static final String PROXY_PASSWORD = "s3fs_proxy_password"; - public static final String PROXY_PORT = "s3fs_proxy_port"; - public static final String PROXY_USERNAME = "s3fs_proxy_username"; - public static final String PROXY_WORKSTATION = "s3fs_proxy_workstation"; - public static final String SOCKET_SEND_BUFFER_SIZE_HINT = "s3fs_socket_send_buffer_size_hint"; - public static final String SOCKET_RECEIVE_BUFFER_SIZE_HINT = "s3fs_socket_receive_buffer_size_hint"; - public static final String SOCKET_TIMEOUT = "s3fs_socket_timeout"; - public static final String USER_AGENT = "s3fs_user_agent"; - public static final String SIGNER_OVERRIDE = "s3fs_signer_override"; - public static final String PATH_STYLE_ACCESS = "s3fs_path_style_access"; - - /** - * Build a new Amazon S3 instance with the URI and the properties provided - * @param uri URI mandatory - * @param props Properties with the credentials and others options - * @return AmazonS3 - */ - public AmazonS3 getAmazonS3Client(URI uri, Properties props) { - return AmazonS3ClientBuilder - .standard() - .withCredentials(getCredentialsProvider(props)) - .withClientConfiguration(getClientConfiguration(props)) - .withRegion(Regions.DEFAULT_REGION) - .withForceGlobalBucketAccessEnabled(true) - .build(); - } - - protected AwsClientBuilder.EndpointConfiguration getEndpointConfiguration(URI uri) { - String endpoint = uri.getHost(); - if (uri.getPort() != -1) - endpoint = uri.getHost() + ':' + uri.getPort(); - return new AwsClientBuilder.EndpointConfiguration(endpoint, Regions.DEFAULT_REGION.toString()); - } - - protected AWSCredentialsProvider getCredentialsProvider(Properties props) { - if (props.getProperty(ACCESS_KEY) == null && props.getProperty(SECRET_KEY) == null) { - return new AnonymousAWSCredentialsProvider(); - } - return new AWSStaticCredentialsProvider(getAWSCredentials(props)); - } - - protected S3ClientOptions getClientOptions(Properties props) { - S3ClientOptions.Builder builder = S3ClientOptions.builder(); - if (props.getProperty(PATH_STYLE_ACCESS) != null && - Boolean.parseBoolean(props.getProperty(PATH_STYLE_ACCESS))) - builder.setPathStyleAccess(true); - - return builder.build(); - } - - protected ClientConfiguration getClientConfiguration(Properties props) { - ClientConfiguration clientConfiguration = new ClientConfiguration(); - if (props.getProperty(CONNECTION_TIMEOUT) != null) - clientConfiguration.setConnectionTimeout(Integer.parseInt(props.getProperty(CONNECTION_TIMEOUT))); - if (props.getProperty(MAX_CONNECTIONS) != null) - clientConfiguration.setMaxConnections(Integer.parseInt(props.getProperty(MAX_CONNECTIONS))); - if (props.getProperty(MAX_ERROR_RETRY) != null) - clientConfiguration.setMaxErrorRetry(Integer.parseInt(props.getProperty(MAX_ERROR_RETRY))); - if (props.getProperty(PROTOCOL) != null) - clientConfiguration.setProtocol(Protocol.valueOf(props.getProperty(PROTOCOL))); - if (props.getProperty(PROXY_DOMAIN) != null) - clientConfiguration.setProxyDomain(props.getProperty(PROXY_DOMAIN)); - if (props.getProperty(PROXY_HOST) != null) - clientConfiguration.setProxyHost(props.getProperty(PROXY_HOST)); - if (props.getProperty(PROXY_PASSWORD) != null) - clientConfiguration.setProxyPassword(props.getProperty(PROXY_PASSWORD)); - if (props.getProperty(PROXY_PORT) != null) - clientConfiguration.setProxyPort(Integer.parseInt(props.getProperty(PROXY_PORT))); - if (props.getProperty(PROXY_USERNAME) != null) - clientConfiguration.setProxyUsername(props.getProperty(PROXY_USERNAME)); - if (props.getProperty(PROXY_WORKSTATION) != null) - clientConfiguration.setProxyWorkstation(props.getProperty(PROXY_WORKSTATION)); - int socketSendBufferSizeHint = 0; - if (props.getProperty(SOCKET_SEND_BUFFER_SIZE_HINT) != null) - socketSendBufferSizeHint = Integer.parseInt(props.getProperty(SOCKET_SEND_BUFFER_SIZE_HINT)); - int socketReceiveBufferSizeHint = 0; - if (props.getProperty(SOCKET_RECEIVE_BUFFER_SIZE_HINT) != null) - socketReceiveBufferSizeHint = Integer.parseInt(props.getProperty(SOCKET_RECEIVE_BUFFER_SIZE_HINT)); - clientConfiguration.setSocketBufferSizeHints(socketSendBufferSizeHint, socketReceiveBufferSizeHint); - if (props.getProperty(SOCKET_TIMEOUT) != null) - clientConfiguration.setSocketTimeout(Integer.parseInt(props.getProperty(SOCKET_TIMEOUT))); - if (props.getProperty(USER_AGENT) != null) - clientConfiguration.setUserAgentPrefix(props.getProperty(USER_AGENT)); - if (props.getProperty(SIGNER_OVERRIDE) != null) - clientConfiguration.setSignerOverride(props.getProperty(SIGNER_OVERRIDE)); - return clientConfiguration; - } - - protected BasicAWSCredentials getAWSCredentials(Properties props) { - return new BasicAWSCredentials(props.getProperty(ACCESS_KEY), props.getProperty(SECRET_KEY)); - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/LICENSE b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/LICENSE deleted file mode 100644 index 66de3f67b72..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/LICENSE +++ /dev/null @@ -1,31 +0,0 @@ -The s3fs package is based on - -https://github.com/lasersonlab/Amazon-S3-FileSystem-NIO2 - -version 5117da7c5a75a455951d7a1788c1a4b7a0719692 -Aug 25, 2022 - -Published under: - -MIT License (MIT) - -Copyright (c) 2014 Javier Arnáiz @arnaix -Copyright (c) 2014 Better.be Application Services BV - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3AccessControlList.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3AccessControlList.java deleted file mode 100644 index 4957aaa933d..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3AccessControlList.java +++ /dev/null @@ -1,64 +0,0 @@ -package com.scalableminds.webknossos.datastore.s3fs; - -import static java.lang.String.format; - -import java.nio.file.AccessDeniedException; -import java.nio.file.AccessMode; -import java.util.EnumSet; - -import com.amazonaws.services.s3.model.AccessControlList; -import com.amazonaws.services.s3.model.Grant; -import com.amazonaws.services.s3.model.Owner; -import com.amazonaws.services.s3.model.Permission; - -public class S3AccessControlList { - private String fileStoreName; - private String key; - private AccessControlList acl; - private Owner owner; - - public S3AccessControlList(String fileStoreName, String key, AccessControlList acl, Owner owner) { - this.fileStoreName = fileStoreName; - this.acl = acl; - this.key = key; - this.owner = owner; - } - - public String getKey() { - return key; - } - - /** - * have almost one of the permission set in the parameter permissions - * - * @param permissions almost one - * @return - */ - private boolean hasPermission(EnumSet permissions) { - for (Grant grant : acl.getGrantsAsList()) - if (grant.getGrantee().getIdentifier().equals(owner.getId()) && permissions.contains(grant.getPermission())) - return true; - return false; - } - - public void checkAccess(AccessMode[] modes) throws AccessDeniedException { - for (AccessMode accessMode : modes) { - switch (accessMode) { - case EXECUTE: - throw new AccessDeniedException(fileName(), null, "file is not executable"); - case READ: - if (!hasPermission(EnumSet.of(Permission.FullControl, Permission.Read))) - throw new AccessDeniedException(fileName(), null, "file is not readable"); - break; - case WRITE: - if (!hasPermission(EnumSet.of(Permission.FullControl, Permission.Write))) - throw new AccessDeniedException(fileName(), null, format("bucket '%s' is not writable", fileStoreName)); - break; - } - } - } - - private String fileName() { - return fileStoreName + S3Path.PATH_SEPARATOR + key; - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileChannel.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileChannel.java deleted file mode 100644 index 1cd2ea5e8ee..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileChannel.java +++ /dev/null @@ -1,168 +0,0 @@ -package com.scalableminds.webknossos.datastore.s3fs; - -import com.amazonaws.services.s3.model.ObjectMetadata; -import com.amazonaws.services.s3.model.S3Object; -import org.apache.tika.Tika; - -import java.io.*; -import java.nio.ByteBuffer; -import java.nio.MappedByteBuffer; -import java.nio.channels.FileChannel; -import java.nio.channels.FileLock; -import java.nio.channels.ReadableByteChannel; -import java.nio.channels.WritableByteChannel; -import java.nio.file.*; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; - -import static java.lang.String.format; - -public class S3FileChannel extends FileChannel { - - private S3Path path; - private Set options; - private FileChannel filechannel; - private Path tempFile; - - public S3FileChannel(S3Path path, Set options) throws IOException { - this.path = path; - this.options = Collections.unmodifiableSet(new HashSet<>(options)); - String key = path.getKey(); - boolean exists = path.getFileSystem().provider().exists(path); - - if (exists && this.options.contains(StandardOpenOption.CREATE_NEW)) - throw new FileAlreadyExistsException(format("target already exists: %s", path)); - else if (!exists && !this.options.contains(StandardOpenOption.CREATE_NEW) && - !this.options.contains(StandardOpenOption.CREATE)) - throw new NoSuchFileException(format("target not exists: %s", path)); - - tempFile = Files.createTempFile("temp-s3-", key.replaceAll("/", "_")); - boolean removeTempFile = true; - try { - if (exists) { - try (S3Object object = path.getFileSystem() - .getClient() - .getObject(path.getFileStore().getBucket().getName(), key)) { - Files.copy(object.getObjectContent(), tempFile, StandardCopyOption.REPLACE_EXISTING); - } - } - - Set fileChannelOptions = new HashSet<>(this.options); - fileChannelOptions.remove(StandardOpenOption.CREATE_NEW); - filechannel = FileChannel.open(tempFile, fileChannelOptions); - removeTempFile = false; - } finally { - if (removeTempFile) { - Files.deleteIfExists(tempFile); - } - } - } - - @Override - public int read(ByteBuffer dst) throws IOException { - return filechannel.read(dst); - } - - @Override - public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { - return filechannel.read(dsts, offset, length); - } - - @Override - public int write(ByteBuffer src) throws IOException { - return filechannel.write(src); - } - - @Override - public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { - return filechannel.write(srcs, offset, length); - } - - @Override - public long position() throws IOException { - return filechannel.position(); - } - - @Override - public FileChannel position(long newPosition) throws IOException { - return filechannel.position(newPosition); - } - - @Override - public long size() throws IOException { - return filechannel.size(); - } - - @Override - public FileChannel truncate(long size) throws IOException { - return filechannel.truncate(size); - } - - @Override - public void force(boolean metaData) throws IOException { - filechannel.force(metaData); - } - - @Override - public long transferTo(long position, long count, WritableByteChannel target) throws IOException { - return filechannel.transferTo(position, count, target); - } - - @Override - public long transferFrom(ReadableByteChannel src, long position, long count) throws IOException { - return filechannel.transferFrom(src, position, count); - } - - @Override - public int read(ByteBuffer dst, long position) throws IOException { - return filechannel.read(dst, position); - } - - @Override - public int write(ByteBuffer src, long position) throws IOException { - return filechannel.write(src, position); - } - - @Override - public MappedByteBuffer map(MapMode mode, long position, long size) throws IOException { - return filechannel.map(mode, position, size); - } - - @Override - public FileLock lock(long position, long size, boolean shared) throws IOException { - return filechannel.lock(position, size, shared); - } - - @Override - public FileLock tryLock(long position, long size, boolean shared) throws IOException { - return filechannel.tryLock(position, size, shared); - } - - @Override - protected void implCloseChannel() throws IOException { - super.close(); - filechannel.close(); - if (!this.options.contains(StandardOpenOption.READ)) { - sync(); - } - Files.deleteIfExists(tempFile); - } - - /** - * try to sync the temp file with the remote s3 path. - * - * @throws IOException if the tempFile fails to open a newInputStream - */ - protected void sync() throws IOException { - try (InputStream stream = new BufferedInputStream(Files.newInputStream(tempFile))) { - ObjectMetadata metadata = new ObjectMetadata(); - metadata.setContentLength(Files.size(tempFile)); - metadata.setContentType(new Tika().detect(stream, path.getFileName().toString())); - - String bucket = path.getFileStore().name(); - String key = path.getKey(); - path.getFileSystem().getClient().putObject(bucket, key, stream, metadata); - } - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileStore.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileStore.java deleted file mode 100644 index 31f3852db0b..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileStore.java +++ /dev/null @@ -1,146 +0,0 @@ -package com.scalableminds.webknossos.datastore.s3fs; - -import java.io.IOException; -import java.nio.file.FileStore; -import java.nio.file.attribute.FileAttributeView; -import java.nio.file.attribute.FileStoreAttributeView; - -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.Bucket; -import com.amazonaws.services.s3.model.Owner; - -public class S3FileStore extends FileStore implements Comparable { - - private S3FileSystem fileSystem; - private String name; - - public S3FileStore(S3FileSystem s3FileSystem, String name) { - this.fileSystem = s3FileSystem; - this.name = name; - } - - @Override - public String name() { - return name; - } - - @Override - public String type() { - return "S3Bucket"; - } - - @Override - public boolean isReadOnly() { - return false; - } - - @Override - public long getTotalSpace() throws IOException { - return Long.MAX_VALUE; - } - - @Override - public long getUsableSpace() throws IOException { - return Long.MAX_VALUE; - } - - @Override - public long getUnallocatedSpace() throws IOException { - return Long.MAX_VALUE; - } - - @Override - public boolean supportsFileAttributeView(Class type) { - return false; - } - - @Override - public boolean supportsFileAttributeView(String attributeViewName) { - return false; - } - - @SuppressWarnings("unchecked") - @Override - public V getFileStoreAttributeView(Class type) { - if (type != S3FileStoreAttributeView.class) - throw new IllegalArgumentException("FileStoreAttributeView of type '" + type.getName() + "' is not supported."); - Bucket buck = getBucket(); - Owner owner = buck.getOwner(); - return (V) new S3FileStoreAttributeView(buck.getCreationDate(), buck.getName(), owner.getId(), owner.getDisplayName()); - } - - @Override - public Object getAttribute(String attribute) throws IOException { - return getFileStoreAttributeView(S3FileStoreAttributeView.class).getAttribute(attribute); - } - - public S3FileSystem getFileSystem() { - return fileSystem; - } - - public Bucket getBucket() { - return getBucket(name); - } - - private Bucket getBucket(String bucketName) { - for (Bucket buck : getClient().listBuckets()) - if (buck.getName().equals(bucketName)) - return buck; - return null; - } - - public S3Path getRootDirectory() { - return new S3Path(fileSystem, "/" + this.name()); - } - - private AmazonS3 getClient() { - return fileSystem.getClient(); - } - - public Owner getOwner() { - Bucket buck = getBucket(); - if (buck != null) - return buck.getOwner(); - return fileSystem.getClient().getS3AccountOwner(); - } - - @Override - public int compareTo(S3FileStore o) { - if (this == o) - return 0; - return o.name().compareTo(name); - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((fileSystem == null) ? 0 : fileSystem.hashCode()); - result = prime * result + ((name == null) ? 0 : name.hashCode()); - return result; - } - - - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (!(obj instanceof S3FileStore)) - return false; - S3FileStore other = (S3FileStore) obj; - - if (fileSystem == null) { - if (other.fileSystem != null) - return false; - } else if (!fileSystem.equals(other.fileSystem)) - return false; - if (name == null) { - if (other.name != null) - return false; - } else if (!name.equals(other.name)) - return false; - return true; - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileStoreAttributeView.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileStoreAttributeView.java deleted file mode 100644 index 7f202373e71..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileStoreAttributeView.java +++ /dev/null @@ -1,47 +0,0 @@ -package com.scalableminds.webknossos.datastore.s3fs; - -import java.nio.file.attribute.FileStoreAttributeView; -import java.util.Date; - -public class S3FileStoreAttributeView implements FileStoreAttributeView { - - public static final String ATTRIBUTE_VIEW_NAME = "S3FileStoreAttributeView"; - - private Date creationDate; - private String name; - private String ownerId; - private String ownerDisplayName; - - public static enum AttrID { - creationDate, name, ownerId, ownerDisplayName - } - - public S3FileStoreAttributeView(Date creationDate, String name, String ownerId, String ownerDisplayName) { - this.creationDate = creationDate; - this.name = name; - this.ownerId = ownerId; - this.ownerDisplayName = ownerDisplayName; - } - - @Override - public String name() { - return ATTRIBUTE_VIEW_NAME; - } - - public Object getAttribute(String attribute) { - return getAttribute(AttrID.valueOf(attribute)); - } - - private Object getAttribute(AttrID attrID) { - switch (attrID) { - case creationDate: - return creationDate; - case ownerDisplayName: - return ownerDisplayName; - case ownerId: - return ownerId; - default: - return name; - } - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystem.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystem.java deleted file mode 100644 index 4de5c33c0a7..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystem.java +++ /dev/null @@ -1,184 +0,0 @@ -package com.scalableminds.webknossos.datastore.s3fs; - -import java.io.IOException; -import java.net.URI; -import java.nio.file.FileStore; -import java.nio.file.FileSystem; -import java.nio.file.Path; -import java.nio.file.PathMatcher; -import java.nio.file.WatchService; -import java.nio.file.attribute.UserPrincipalLookupService; -import java.util.Set; - -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.Bucket; -import com.google.cloud.storage.contrib.nio.CloudStorageFileSystem; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; - -/** - * S3FileSystem with a concrete client configured and ready to use. - * - * @see AmazonS3 configured by {@link AmazonS3Factory} - */ -public class S3FileSystem extends FileSystem implements Comparable { - - public static S3FileSystem forUri(URI uri, String keyId, String secretKey) { - return S3FileSystemProvider.forUri(uri, keyId, secretKey); - } - - public static S3FileSystem forUri(URI uri) { - return forUri(uri, null, null); - } - - private final S3FileSystemProvider provider; - private final String key; - private final AmazonS3 client; - private int cache; - private final String bucket; - - public S3FileSystem(S3FileSystemProvider provider, String key, AmazonS3 client, String bucket) { - this.provider = provider; - this.key = key; - this.client = client; - this.cache = 60000; // 1 minute cache for the s3Path - this.bucket = bucket; - } - - @Override - public S3FileSystemProvider provider() { - return provider; - } - - public String getKey() { - return key; - } - - public String getBucket() { - return bucket; - } - - public Boolean hasBucket() { - return bucket != null; - } - - @Override - public void close() throws IOException { - this.provider.close(this); - } - - @Override - public boolean isOpen() { - return this.provider.isOpen(this); - } - - @Override - public boolean isReadOnly() { - return false; - } - - @Override - public String getSeparator() { - return S3Path.PATH_SEPARATOR; - } - - @Override - public Iterable getRootDirectories() { - ImmutableList.Builder builder = ImmutableList.builder(); - for (FileStore fileStore : getFileStores()) { - builder.add(((S3FileStore) fileStore).getRootDirectory()); - } - return builder.build(); - } - - @Override - public Iterable getFileStores() { - ImmutableList.Builder builder = ImmutableList.builder(); - for (Bucket bucket : client.listBuckets()) { - builder.add(new S3FileStore(this, bucket.getName())); - } - return builder.build(); - } - - @Override - public Set supportedFileAttributeViews() { - return ImmutableSet.of("basic", "posix"); - } - - @Override - public S3Path getPath(String first, String... more) { - - if (hasBucket() && first.startsWith("/")) { - first = "/" + this.bucket + first; - } - if (more.length == 0) { - return new S3Path(this, first); - } - - return new S3Path(this, first, more); - } - - @Override - public PathMatcher getPathMatcher(String syntaxAndPattern) { - throw new UnsupportedOperationException(); - } - - @Override - public UserPrincipalLookupService getUserPrincipalLookupService() { - throw new UnsupportedOperationException(); - } - - @Override - public WatchService newWatchService() throws IOException { - throw new UnsupportedOperationException(); - } - - public AmazonS3 getClient() { - return client; - } - - public String[] key2Parts(String keyParts) { - String[] parts = keyParts.split(S3Path.PATH_SEPARATOR); - String[] split = new String[parts.length]; - int i = 0; - for (String part : parts) { - split[i++] = part; - } - return split; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result; - result = prime * result + ((key == null) ? 0 : key.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (!(obj instanceof S3FileSystem)) - return false; - S3FileSystem other = (S3FileSystem) obj; - if (key == null) { - if (other.key != null) - return false; - } else if (!key.equals(other.key)) - return false; - return true; - } - - @Override - public int compareTo(S3FileSystem o) { - return key.compareTo(o.getKey()); - } - - public int getCache() { - return cache; - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemConfigurationException.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemConfigurationException.java deleted file mode 100644 index 9b7e464df74..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemConfigurationException.java +++ /dev/null @@ -1,9 +0,0 @@ -package com.scalableminds.webknossos.datastore.s3fs; - -public class S3FileSystemConfigurationException extends RuntimeException { - private static final long serialVersionUID = 1L; - - public S3FileSystemConfigurationException(String message, Throwable cause) { - super(message, cause); - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemProvider.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemProvider.java deleted file mode 100644 index dfdd2f03d8b..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3FileSystemProvider.java +++ /dev/null @@ -1,669 +0,0 @@ -package com.scalableminds.webknossos.datastore.s3fs; - -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.internal.Constants; -import com.amazonaws.services.s3.model.AmazonS3Exception; -import com.amazonaws.services.s3.model.Bucket; -import com.amazonaws.services.s3.model.ObjectMetadata; -import com.amazonaws.services.s3.model.S3Object; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Sets; -import com.scalableminds.webknossos.datastore.s3fs.attribute.S3BasicFileAttributes; -import com.scalableminds.webknossos.datastore.s3fs.attribute.S3PosixFileAttributes; -import com.scalableminds.webknossos.datastore.s3fs.util.AttributesUtils; -import com.scalableminds.webknossos.datastore.s3fs.util.Cache; -import com.scalableminds.webknossos.datastore.s3fs.util.S3Utils; -import org.apache.commons.lang3.NotImplementedException; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.net.URI; -import java.net.URISyntaxException; -import java.nio.channels.FileChannel; -import java.nio.channels.SeekableByteChannel; -import java.nio.file.*; -import java.nio.file.attribute.*; -import java.nio.file.spi.FileSystemProvider; -import java.util.*; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; - -import static com.google.common.collect.Sets.difference; -import static java.lang.String.format; - -/** - * Spec: - *

- * URI: s3://[user@]{bucket}/{key} - * It's assumed to use the default S3 endpoint (s3.amazonaws.com) - *

- *

- * FileSystem roots: /{bucket}/ - *

- *

- * Treatment of S3 objects: - If a key ends in "/" it's considered a directory - * *and* a regular file. Otherwise, it's just a regular file. - It is legal for - * a key "xyz" and "xyz/" to exist at the same time. The latter is treated as a - * directory. - If a file "a/b/c" exists but there's no "a" or "a/b/", these are - * considered "implicit" directories. They can be listed, traversed and deleted. - *

- *

- * Deviations from FileSystem provider API: - Deleting a file or directory - * always succeeds, regardless of whether the file/directory existed before the - * operation was issued i.e. Files.delete() and Files.deleteIfExists() are - * equivalent. - *

- *

- * Future versions of this provider might allow for a strict mode that mimics - * the semantics of the FileSystem provider API on a best effort basis, at an - * increased processing cost. - *

- */ -public class S3FileSystemProvider extends FileSystemProvider { - public static final String AMAZON_S3_FACTORY_CLASS = "s3fs_amazon_s3_factory"; - - private static final ConcurrentMap fileSystems = new ConcurrentHashMap<>(); - private static final List PROPS_TO_OVERLOAD = Arrays.asList(AmazonS3Factory.ACCESS_KEY, AmazonS3Factory.SECRET_KEY, AmazonS3Factory.REQUEST_METRIC_COLLECTOR_CLASS, AmazonS3Factory.CONNECTION_TIMEOUT, AmazonS3Factory.MAX_CONNECTIONS, AmazonS3Factory.MAX_ERROR_RETRY, AmazonS3Factory.PROTOCOL, AmazonS3Factory.PROXY_DOMAIN, - AmazonS3Factory.PROXY_HOST, AmazonS3Factory.PROXY_PASSWORD, AmazonS3Factory.PROXY_PORT, AmazonS3Factory.PROXY_USERNAME, AmazonS3Factory.PROXY_WORKSTATION, AmazonS3Factory.SOCKET_SEND_BUFFER_SIZE_HINT, AmazonS3Factory.SOCKET_RECEIVE_BUFFER_SIZE_HINT, AmazonS3Factory.SOCKET_TIMEOUT, - AmazonS3Factory.USER_AGENT, AMAZON_S3_FACTORY_CLASS, AmazonS3Factory.SIGNER_OVERRIDE, AmazonS3Factory.PATH_STYLE_ACCESS); - - private final S3Utils s3Utils = new S3Utils(); - private Cache cache = new Cache(); - - @Override - public String getScheme() { - return "s3"; - } - - public static S3FileSystem forUri(URI uri, String accessKey, String secretKey) { - Properties props = new Properties(); - if (accessKey != null && secretKey != null) { - props.put(AmazonS3Factory.ACCESS_KEY, accessKey); - props.put(AmazonS3Factory.SECRET_KEY, secretKey); - } - URI uriWithNormalizedHost = resolveShortcutHost(uri); - String key = buildFileSystemKey(uriWithNormalizedHost, accessKey); - if (fileSystems.containsKey(key)) { - return fileSystems.get(key); - } - String bucket = S3FileSystemProvider.hostBucketFromUri(uri); - S3FileSystemProvider provider = new S3FileSystemProvider(); - AmazonS3 client = new AmazonS3Factory().getAmazonS3Client(uri, props); - S3FileSystem fileSystem = new S3FileSystem(provider, key, client, bucket); - fileSystems.put(fileSystem.getKey(), fileSystem); - return fileSystem; - } - - @Override - public FileSystem newFileSystem(URI uri, Map env) { - validateUri(uri); - // get properties for the env or properties or system - Properties props = getProperties(uri, env); - validateProperties(props); - // try to get the filesystem by the key - String key = S3FileSystemProvider.buildFileSystemKey(uri, (String) props.get(AmazonS3Factory.ACCESS_KEY)); - if (fileSystems.containsKey(key)) { - throw new FileSystemAlreadyExistsException("File system " + uri.getScheme() + ':' + key + " already exists"); - } - // create the filesystem with the final properties, store and return - S3FileSystem fileSystem = createFileSystem(uri, props); - fileSystems.put(fileSystem.getKey(), fileSystem); - return fileSystem; - } - - private void validateProperties(Properties props) { - Preconditions.checkArgument( - (props.getProperty(AmazonS3Factory.ACCESS_KEY) == null && props.getProperty(AmazonS3Factory.SECRET_KEY) == null) - || (props.getProperty(AmazonS3Factory.ACCESS_KEY) != null && props.getProperty(AmazonS3Factory.SECRET_KEY) != null), "%s and %s should both be provided or should both be omitted", - AmazonS3Factory.ACCESS_KEY, AmazonS3Factory.SECRET_KEY); - } - - private Properties getProperties(URI uri, Map env) { - Properties props = loadAmazonProperties(); - addEnvProperties(props, env); - // and access key and secret key can be override - String userInfo = uri.getUserInfo(); - if (userInfo != null) { - String[] keys = userInfo.split(":"); - props.setProperty(AmazonS3Factory.ACCESS_KEY, keys[0]); - if (keys.length > 1) { - props.setProperty(AmazonS3Factory.SECRET_KEY, keys[1]); - } - } - return props; - } - - private static String buildFileSystemKey(URI uri) { - return buildFileSystemKey(uri, null); - } - - /** - * get the file system key represented by: the access key @ endpoint. - * Example: access-key@s3.amazonaws.com - * If uri host is empty then s3.amazonaws.com are used as host - * - * @param uri URI with the endpoint - * @param accessKey access key id - * @return String - */ - protected static String buildFileSystemKey(URI uri, String accessKey) { - // we don`t use uri.getUserInfo and uri.getHost because secret key and access key have special chars - // and dont return the correct strings - String uriString = uri.toString().replace("s3://", ""); - String authority = null; - int authoritySeparator = uriString.indexOf("@"); - - if (authoritySeparator > 0) { - authority = uriString.substring(0, authoritySeparator); - } - - if (authority != null) { - String host = uriString.substring(uriString.indexOf("@") + 1, uriString.length()); - int lastPath = host.indexOf("/"); - if (lastPath > -1) { - host = host.substring(0, lastPath); - } - if (host.length() == 0) { - host = Constants.S3_HOSTNAME; - } - return authority + "@" + host; - } else { - return (accessKey != null ? accessKey + "@" : "") + - (uri.getHost() != null ? uri.getHost() : Constants.S3_HOSTNAME); - } - } - - protected void validateUri(URI uri) { - Preconditions.checkNotNull(uri, "uri is null"); - Preconditions.checkArgument(uri.getScheme().equals(getScheme()), "uri scheme must be 's3': '%s'", uri); - } - - protected void addEnvProperties(Properties props, Map env) { - if (env == null) - env = new HashMap<>(); - for (String key : PROPS_TO_OVERLOAD) { - // but can be overloaded by envs vars - overloadProperty(props, env, key); - } - - for (String key : env.keySet()) { - Object value = env.get(key); - if (!PROPS_TO_OVERLOAD.contains(key)) { - props.put(key, value); - } - } - } - - /** - * try to override the properties props with: - *
    - *
  1. the map or if not setted:
  2. - *
  3. the system property or if not setted:
  4. - *
  5. the system vars
  6. - *
- * - * @param props Properties to override - * @param env Map the first option - * @param key String the key - */ - private void overloadProperty(Properties props, Map env, String key) { - boolean overloaded = overloadPropertiesWithEnv(props, env, key); - - if (!overloaded) { - overloaded = overloadPropertiesWithSystemProps(props, key); - } - - if (!overloaded) { - overloadPropertiesWithSystemEnv(props, key); - } - } - - /** - * @return true if the key are overloaded by the map parameter - */ - protected boolean overloadPropertiesWithEnv(Properties props, Map env, String key) { - if (env.get(key) != null && env.get(key) instanceof String) { - props.setProperty(key, (String) env.get(key)); - return true; - } - return false; - } - - /** - * @return true if the key are overloaded by a system property - */ - public boolean overloadPropertiesWithSystemProps(Properties props, String key) { - if (System.getProperty(key) != null) { - props.setProperty(key, System.getProperty(key)); - return true; - } - return false; - } - - /** - * The system envs have preference over the properties files. - * So we overload it - * @param props Properties - * @param key String - * @return true if the key are overloaded by a system property - */ - public boolean overloadPropertiesWithSystemEnv(Properties props, String key) { - if (systemGetEnv(key) != null) { - props.setProperty(key, systemGetEnv(key)); - return true; - } - return false; - } - - /** - * Get the system env with the key param - * @param key String - * @return String or null - */ - public String systemGetEnv(String key) { - return System.getenv(key); - } - - /** - * Get existing filesystem based on a combination of URI and env settings. Create new filesystem otherwise. - * - * @param uri URI of existing, or to be created filesystem. - * @param env environment settings. - * @return new or existing filesystem. - */ - public FileSystem getFileSystem(URI uri, Map env) { - validateUri(uri); - Properties props = getProperties(uri, env); - String key = S3FileSystemProvider.buildFileSystemKey(uri, (String) props.get(AmazonS3Factory.ACCESS_KEY)); // s3fs_access_key is part of the key here. - if (fileSystems.containsKey(key)) - return fileSystems.get(key); - return newFileSystem(uri, env); - } - - @Override - public S3FileSystem getFileSystem(URI uri) { - validateUri(uri); - String key = S3FileSystemProvider.buildFileSystemKey(uri); - if (fileSystems.containsKey(key)) { - return fileSystems.get(key); - } else { - throw new FileSystemNotFoundException("S3 filesystem not yet created. Use newFileSystem() instead"); - } - } - - private S3Path toS3Path(Path path) { - Preconditions.checkArgument(path instanceof S3Path, "path must be an instance of %s", S3Path.class.getName()); - return (S3Path) path; - } - - /** - * Deviation from spec: throws FileSystemNotFoundException if FileSystem - * hasn't yet been initialized. Call newFileSystem() first. - * Need credentials. Maybe set credentials after? how? - * TODO: we can create a new one if the credentials are present by: - * s3://access-key:secret-key@endpoint.com/ - */ - @Override - public Path getPath(URI uri) { - FileSystem fileSystem = getFileSystem(uri); - /** - * TODO: set as a list. one s3FileSystem by region - */ - return fileSystem.getPath(uri.getPath()); - } - - @Override - public DirectoryStream newDirectoryStream(Path dir, DirectoryStream.Filter filter) throws IOException { - final S3Path s3Path = toS3Path(dir); - return new DirectoryStream() { - @Override - public void close() throws IOException { - // nothing to do here - } - - @Override - public Iterator iterator() { - return new S3Iterator(s3Path); - } - }; - } - - @Override - public InputStream newInputStream(Path path, OpenOption... options) throws IOException { - S3Path s3Path = toS3Path(path); - String key = s3Path.getKey(); - - Preconditions.checkArgument(options.length == 0, "OpenOptions not yet supported: %s", ImmutableList.copyOf(options)); // TODO - Preconditions.checkArgument(!key.equals(""), "cannot create InputStream for root directory: %s", path); - - try { - S3Object object = s3Path.getFileSystem().getClient().getObject(s3Path.getFileStore().name(), key); - InputStream res = object.getObjectContent(); - - if (res == null) - throw new IOException(String.format("The specified path is a directory: %s", path)); - - return res; - } catch (AmazonS3Exception e) { - if (e.getStatusCode() == 404) - throw new NoSuchFileException(path.toString()); - // otherwise throws a generic IO exception - throw new IOException(String.format("Cannot access file: %s", path), e); - } - } - - @Override - public SeekableByteChannel newByteChannel(Path path, Set options, FileAttribute... attrs) throws IOException { - S3Path s3Path = toS3Path(path); - if (options.isEmpty() || options.contains(StandardOpenOption.READ)) { - if (options.contains(StandardOpenOption.WRITE)) - throw new UnsupportedOperationException( - "Can't read and write one on channel" - ); - return new S3ReadOnlySeekableByteChannel(s3Path, options); - } else { - return new S3SeekableByteChannel(s3Path, options); - } - } - - @Override - public FileChannel newFileChannel(Path path, Set options, FileAttribute... attrs) throws IOException { - S3Path s3Path = toS3Path(path); - return new S3FileChannel(s3Path, options); - } - - /** - * Deviations from spec: Does not perform atomic check-and-create. Since a - * directory is just an S3 object, all directories in the hierarchy are - * created or it already existed. - */ - @Override - public void createDirectory(Path dir, FileAttribute... attrs) throws IOException { - S3Path s3Path = toS3Path(dir); - Preconditions.checkArgument(attrs.length == 0, "attrs not yet supported: %s", ImmutableList.copyOf(attrs)); // TODO - if (exists(s3Path)) - throw new FileAlreadyExistsException(format("target already exists: %s", s3Path)); - // create bucket if necesary - Bucket bucket = s3Path.getFileStore().getBucket(); - String bucketName = s3Path.getFileStore().name(); - if (bucket == null) { - s3Path.getFileSystem().getClient().createBucket(bucketName); - } - // create the object as directory - ObjectMetadata metadata = new ObjectMetadata(); - metadata.setContentLength(0); - String directoryKey = s3Path.getKey().endsWith("/") ? s3Path.getKey() : s3Path.getKey() + "/"; - s3Path.getFileSystem().getClient().putObject(bucketName, directoryKey, new ByteArrayInputStream(new byte[0]), metadata); - } - - @Override - public void delete(Path path) throws IOException { - S3Path s3Path = toS3Path(path); - if (Files.notExists(s3Path)) - throw new NoSuchFileException("the path: " + this + " not exists"); - if (Files.isDirectory(s3Path) && Files.newDirectoryStream(s3Path).iterator().hasNext()) - throw new DirectoryNotEmptyException("the path: " + this + " is a directory and is not empty"); - - String key = s3Path.getKey(); - String bucketName = s3Path.getFileStore().name(); - s3Path.getFileSystem().getClient().deleteObject(bucketName, key); - // we delete the two objects (sometimes exists the key '/' and sometimes not) - s3Path.getFileSystem().getClient().deleteObject(bucketName, key + "/"); - } - - @Override - public void copy(Path source, Path target, CopyOption... options) throws IOException { - if (isSameFile(source, target)) - return; - - S3Path s3Source = toS3Path(source); - S3Path s3Target = toS3Path(target); - // TODO: implements support for copying directories - - Preconditions.checkArgument(!Files.isDirectory(source), "copying directories is not yet supported: %s", source); - Preconditions.checkArgument(!Files.isDirectory(target), "copying directories is not yet supported: %s", target); - - ImmutableSet actualOptions = ImmutableSet.copyOf(options); - verifySupportedOptions(EnumSet.of(StandardCopyOption.REPLACE_EXISTING), actualOptions); - - if (exists(s3Target) && !actualOptions.contains(StandardCopyOption.REPLACE_EXISTING)) { - throw new FileAlreadyExistsException(format("target already exists: %s", target)); - } - - String bucketNameOrigin = s3Source.getFileStore().name(); - String keySource = s3Source.getKey(); - String bucketNameTarget = s3Target.getFileStore().name(); - String keyTarget = s3Target.getKey(); - s3Source.getFileSystem() - .getClient().copyObject( - bucketNameOrigin, - keySource, - bucketNameTarget, - keyTarget); - } - - @Override - public void move(Path source, Path target, CopyOption... options) throws IOException { - if (options != null && Arrays.asList(options).contains(StandardCopyOption.ATOMIC_MOVE)) - throw new AtomicMoveNotSupportedException(source.toString(), target.toString(), "Atomic not supported"); - copy(source, target, options); - delete(source); - } - - @Override - public boolean isSameFile(Path path1, Path path2) throws IOException { - return path1.isAbsolute() && path2.isAbsolute() && path1.equals(path2); - } - - @Override - public boolean isHidden(Path path) throws IOException { - return false; - } - - @Override - public FileStore getFileStore(Path path) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void checkAccess(Path path, AccessMode... modes) throws IOException { - S3Path s3Path = toS3Path(path); - Preconditions.checkArgument(s3Path.isAbsolute(), "path must be absolute: %s", s3Path); - if (modes.length == 0) { - if (exists(s3Path)) - return; - throw new NoSuchFileException(toString()); - } - - String key = s3Utils.getS3ObjectSummary(s3Path).getKey(); - S3AccessControlList accessControlList = - new S3AccessControlList(s3Path.getFileStore().name(), key, s3Path.getFileSystem().getClient().getObjectAcl(s3Path.getFileStore().name(), key), s3Path.getFileStore().getOwner()); - - accessControlList.checkAccess(modes); - } - - - @Override - public V getFileAttributeView(Path path, Class type, LinkOption... options) { - throw new NotImplementedException(); - } - - @Override - public A readAttributes(Path path, Class type, LinkOption... options) throws IOException { - S3Path s3Path = toS3Path(path); - if (type == BasicFileAttributes.class) { - if (cache.isInTime(s3Path.getFileSystem().getCache(), s3Path.getFileAttributes())) { - A result = type.cast(s3Path.getFileAttributes()); - s3Path.setFileAttributes(null); - return result; - } else { - S3BasicFileAttributes attrs = s3Utils.getS3FileAttributes(s3Path); - s3Path.setFileAttributes(attrs); - return type.cast(attrs); - } - } else if (type == PosixFileAttributes.class) { - if (s3Path.getFileAttributes() instanceof PosixFileAttributes && - cache.isInTime(s3Path.getFileSystem().getCache(), s3Path.getFileAttributes())) { - A result = type.cast(s3Path.getFileAttributes()); - s3Path.setFileAttributes(null); - return result; - } - - S3PosixFileAttributes attrs = s3Utils.getS3PosixFileAttributes(s3Path); - s3Path.setFileAttributes(attrs); - return type.cast(attrs); - } - - throw new UnsupportedOperationException(format("only %s or %s supported", BasicFileAttributes.class, PosixFileAttributes.class)); - } - - @Override - public Map readAttributes(Path path, String attributes, LinkOption... options) throws IOException { - if (attributes == null) { - throw new IllegalArgumentException("Attributes null"); - } - - if (attributes.contains(":") && !attributes.contains("basic:") && !attributes.contains("posix:")) { - throw new UnsupportedOperationException(format("attributes %s are not supported, only basic / posix are supported", attributes)); - } - - if (attributes.equals("*") || attributes.equals("basic:*")) { - BasicFileAttributes attr = readAttributes(path, BasicFileAttributes.class, options); - return AttributesUtils.fileAttributeToMap(attr); - } else if (attributes.equals("posix:*")) { - PosixFileAttributes attr = readAttributes(path, PosixFileAttributes.class, options); - return AttributesUtils.fileAttributeToMap(attr); - } else { - String[] filters = new String[]{attributes}; - if (attributes.contains(",")) { - filters = attributes.split(","); - } - Class filter = BasicFileAttributes.class; - if (attributes.startsWith("posix:")) { - filter = PosixFileAttributes.class; - } - return AttributesUtils.fileAttributeToMap(readAttributes(path, filter, options), filters); - } - } - - @Override - public void setAttribute(Path path, String attribute, Object value, LinkOption... options) throws IOException { - throw new UnsupportedOperationException(); - } - - /** - * Create the fileSystem - * - * @param uri URI - * @param props Properties - * @return S3FileSystem never null - */ - private S3FileSystem createFileSystem(URI uri, Properties props) { - URI uriWithNormalizedHost = S3FileSystemProvider.resolveShortcutHost(uri); - String bucket = hostBucketFromUri(uri); - String key = S3FileSystemProvider.buildFileSystemKey(uri, (String) props.get(AmazonS3Factory.ACCESS_KEY)); - return new S3FileSystem(this, key, getAmazonS3(uriWithNormalizedHost, props), bucket); - } - - public static URI resolveShortcutHost(URI uri) { - String host = uri.getHost(); - String newHost = host; - String bucketPrefix = ""; - if (!host.contains(".")) { // assume host is omitted from uri, shortcut form s3://bucket/key - newHost = "s3.amazonaws.com"; - bucketPrefix = "/" + host; - } else if (host.endsWith(".s3.amazonaws.com")) { - newHost = "s3.amazonaws.com"; - bucketPrefix = "/" + host.substring(0, host.length() - ".s3.amazonaws.com".length()); - } - try { - return new URI(uri.getScheme(), uri.getUserInfo(), newHost, uri.getPort(), bucketPrefix + uri.getPath(), uri.getQuery(), uri.getFragment()); - } catch (URISyntaxException e) { - return uri; - } - } - - - private static String hostBucketFromUri(URI uri) { - String host = uri.getHost(); - if (!host.contains(".")) { // assume host is omitted from uri, shortcut form s3://bucket/key - return host; - } else if (host.endsWith(".s3.amazonaws.com")) { - return host.substring(0, host.length() - ".s3.amazonaws.com".length()); - } - return null; - } - - protected AmazonS3 getAmazonS3(URI uri, Properties props) { - return new AmazonS3Factory().getAmazonS3Client(uri, props); - } - - /** - * find /amazon.properties in the classpath - * - * @return Properties amazon.properties - */ - public Properties loadAmazonProperties() { - Properties props = new Properties(); - // http://www.javaworld.com/javaworld/javaqa/2003-06/01-qa-0606-load.html - // http://www.javaworld.com/javaqa/2003-08/01-qa-0808-property.html - try (InputStream in = Thread.currentThread().getContextClassLoader().getResourceAsStream("amazon.properties")) { - if (in != null) - props.load(in); - } catch (IOException e) { - // If amazon.properties can't be loaded that's ok. - } - return props; - } - - // ~~~ - - private void verifySupportedOptions(Set allowedOptions, Set actualOptions) { - Sets.SetView unsupported = difference(actualOptions, allowedOptions); - Preconditions.checkArgument(unsupported.isEmpty(), "the following options are not supported: %s", unsupported); - } - - /** - * check that the paths exists or not - * - * @param path S3Path - * @return true if exists - */ - boolean exists(S3Path path) { - S3Path s3Path = toS3Path(path); - try { - s3Utils.getS3ObjectSummary(s3Path); - return true; - } catch (NoSuchFileException e) { - return false; - } - } - - public void close(S3FileSystem fileSystem) { - if (fileSystem.getKey() != null && fileSystems.containsKey(fileSystem.getKey())) - fileSystems.remove(fileSystem.getKey()); - } - - public boolean isOpen(S3FileSystem s3FileSystem) { - return fileSystems.containsKey(s3FileSystem.getKey()); - } - - /** - * only 4 testing - */ - - protected static ConcurrentMap getFilesystems() { - return fileSystems; - } - - public Cache getCache() { - return cache; - } - - public void setCache(Cache cache) { - this.cache = cache; - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3Iterator.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3Iterator.java deleted file mode 100644 index 55380755971..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3Iterator.java +++ /dev/null @@ -1,192 +0,0 @@ -package com.scalableminds.webknossos.datastore.s3fs; - -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.NoSuchElementException; -import java.util.Set; - -import com.amazonaws.services.s3.model.ListObjectsRequest; -import com.amazonaws.services.s3.model.ObjectListing; -import com.amazonaws.services.s3.model.S3ObjectSummary; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; -import com.scalableminds.webknossos.datastore.s3fs.util.S3Utils; - -/** - * S3 iterator over folders at first level. - * Future versions of this class should be return the elements - * in a incremental way when the #next() method is called. - */ -public class S3Iterator implements Iterator { - private S3FileSystem fileSystem; - private S3FileStore fileStore; - private String key; - private List items = Lists.newArrayList(); - private Set addedVirtualDirectories = Sets.newHashSet(); - private ObjectListing current; - private int cursor; // index of next element to return - private int size; - private boolean incremental; - - private S3Utils s3Utils = new S3Utils(); - - public S3Iterator(S3Path path) { - this(path, false); - } - - public S3Iterator(S3Path path, boolean incremental) { - this(path.getFileStore(), path.getKey() + (!incremental && !path.getKey().isEmpty() && !path.getKey().endsWith("/") ? "/" : ""), incremental); - } - - public S3Iterator(S3FileStore fileStore, String key, boolean incremental) { - ListObjectsRequest listObjectsRequest = buildRequest(fileStore.name(), key, incremental); - - this.fileStore = fileStore; - this.fileSystem = fileStore.getFileSystem(); - this.key = key; - this.current = fileSystem.getClient().listObjects(listObjectsRequest); - this.incremental = incremental; - loadObjects(); - } - - @Override - public boolean hasNext() { - return cursor != size || current.isTruncated(); - } - - @Override - public S3Path next() { - if (cursor == size && current.isTruncated()) { - this.current = fileSystem.getClient().listNextBatchOfObjects(current); - loadObjects(); - } - if (cursor == size) - throw new NoSuchElementException(); - return items.get(cursor++); - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - - private void loadObjects() { - this.items.clear(); - if (incremental) - parseObjects(); - else - parseObjectListing(key, items, current); - this.size = items.size(); - this.cursor = 0; - } - - private void parseObjects() { - for (final S3ObjectSummary objectSummary : current.getObjectSummaries()) { - final String objectSummaryKey = objectSummary.getKey(); - String[] keyParts = fileSystem.key2Parts(objectSummaryKey); - addParentPaths(keyParts); - S3Path path = new S3Path(fileSystem, "/" + fileStore.name(), keyParts); - if (!items.contains(path)) { - items.add(path); - } - } - } - - private void addParentPaths(String[] keyParts) { - if (keyParts.length <= 1) - return; - String[] subParts = Arrays.copyOf(keyParts, keyParts.length - 1); - List parentPaths = new ArrayList<>(); - while (subParts.length > 0) { - S3Path path = new S3Path(fileSystem, "/" + fileStore.name(), subParts); - String prefix = current.getPrefix(); - - String parentKey = path.getKey(); - if (prefix.length() > parentKey.length() && prefix.contains(parentKey)) - break; - if (items.contains(path) || addedVirtualDirectories.contains(path)) { - subParts = Arrays.copyOf(subParts, subParts.length - 1); - continue; - } - parentPaths.add(path); - addedVirtualDirectories.add(path); - subParts = Arrays.copyOf(subParts, subParts.length - 1); - } - Collections.reverse(parentPaths); - items.addAll(parentPaths); - } - - - /** - * add to the listPath the elements at the same level that s3Path - * - * @param key the uri to parse - * @param listPath List not null list to add - * @param current ObjectListing to walk - */ - private void parseObjectListing(String key, List listPath, ObjectListing current) { - for (String commonPrefix : current.getCommonPrefixes()) { - if (!commonPrefix.equals("/")) { - listPath.add(new S3Path(fileSystem, "/" + fileStore.name(), fileSystem.key2Parts(commonPrefix))); - } - } - // TODO: figure our a way to efficiently preprocess commonPrefix basicFileAttributes - for (final S3ObjectSummary objectSummary : current.getObjectSummaries()) { - final String objectSummaryKey = objectSummary.getKey(); - // we only want the first level - String immediateDescendantKey = getImmediateDescendant(key, objectSummaryKey); - if (immediateDescendantKey != null) { - S3Path descendentPart = new S3Path(fileSystem, "/" + fileStore.name(), fileSystem.key2Parts(immediateDescendantKey)); - descendentPart.setFileAttributes(s3Utils.toS3FileAttributes(objectSummary, descendentPart.getKey())); - if (!listPath.contains(descendentPart)) { - listPath.add(descendentPart); - } - } - } - } - - /** - * The current #buildRequest() get all subdirectories and her content. - * This method filter the keyChild and check if is a inmediate - * descendant of the keyParent parameter - * - * @param keyParent String - * @param keyChild String - * @return String parsed - * or null when the keyChild and keyParent are the same and not have to be returned - */ - private String getImmediateDescendant(String keyParent, String keyChild) { - keyParent = deleteExtraPath(keyParent); - keyChild = deleteExtraPath(keyChild); - final int parentLen = keyParent.length(); - final String childWithoutParent = deleteExtraPath(keyChild.substring(parentLen)); - String[] parts = childWithoutParent.split("/"); - if (parts.length > 0 && !parts[0].isEmpty()) - return keyParent + "/" + parts[0]; - return null; - - } - - private String deleteExtraPath(String keyChild) { - if (keyChild.startsWith("/")) - keyChild = keyChild.substring(1); - if (keyChild.endsWith("/")) - keyChild = keyChild.substring(0, keyChild.length() - 1); - return keyChild; - } - - - ListObjectsRequest buildRequest(String bucketName, String key, boolean incremental) { - return buildRequest(bucketName, key, incremental, null); - } - - ListObjectsRequest buildRequest(String bucketName, String key, boolean incremental, Integer maxKeys) { - if (incremental) - return new ListObjectsRequest(bucketName, key, null, null, maxKeys); - return new ListObjectsRequest(bucketName, key, key, "/", maxKeys); - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3Path.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3Path.java deleted file mode 100644 index 37d1261e670..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3Path.java +++ /dev/null @@ -1,614 +0,0 @@ -package com.scalableminds.webknossos.datastore.s3fs; - -import com.google.common.base.*; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; -import com.scalableminds.webknossos.datastore.s3fs.attribute.S3BasicFileAttributes; - -import java.io.File; -import java.io.IOException; -import java.io.UnsupportedEncodingException; -import java.net.URI; -import java.net.URL; -import java.net.URLDecoder; -import java.nio.file.*; -import java.util.Iterator; -import java.util.List; - -import static com.google.common.collect.Iterables.*; -import static java.lang.String.format; - -public class S3Path implements Path { - - public static final String PATH_SEPARATOR = "/"; - /** - * S3FileStore which represents the Bucket this path resides in. - */ - private final S3FileStore fileStore; - - /** - * URI not encoded - * Is the key for AmazonS3 - */ - private String uri; - - /** - * actual filesystem - */ - private S3FileSystem fileSystem; - - /** - * S3BasicFileAttributes cache - */ - private S3BasicFileAttributes fileAttributes; - - /** - * Build an S3Path from path segments. '/' are stripped from each segment. - * - * @param fileSystem S3FileSystem - * @param first should be start with a '/' and is the bucket name - * @param more directories and files - */ - public S3Path(S3FileSystem fileSystem, String first, String... more) { - - Preconditions.checkArgument(first != null, "first path must be not null"); - Preconditions.checkArgument(!first.startsWith("//"), "first path doesnt start with '//'. Miss bucket"); - // see tests com.upplication.s3fs.Path.EndsWithTest#endsWithRelativeBlankAbsolute() - // Preconditions.checkArgument(!first.isEmpty(), "first path must be not empty"); - - boolean hasBucket = first.startsWith("/"); - - - List pathsURI = Lists - .newArrayList(Splitter.on(PATH_SEPARATOR) - .omitEmptyStrings() - .split(first)); - - if (hasBucket) { // absolute path - - Preconditions.checkArgument(pathsURI.size() >= 1, "path must start with bucket name"); - Preconditions.checkArgument(!pathsURI.get(0).isEmpty(), "bucket name must be not empty"); - String bucket = pathsURI.get(0); - this.fileStore = new S3FileStore(fileSystem, bucket); - // the filestore is not part of the uri - pathsURI.remove(0); - } - else { - // relative uri - this.fileStore = null; - } - - StringBuilder uriBuilder = new StringBuilder(); - if (hasBucket) { - uriBuilder.append(PATH_SEPARATOR); - } - for (String path : pathsURI) { - uriBuilder.append(path + PATH_SEPARATOR); - } - if (more != null) { - for (String path : more) { - uriBuilder.append(path + PATH_SEPARATOR); - } - } - this.uri = normalizeURI(uriBuilder.toString()); - // remove last PATH_SEPARATOR - if (!first.isEmpty() && - // only first param and not ended with PATH_SEPARATOR - ((!first.endsWith(PATH_SEPARATOR) && (more == null || more.length == 0)) - // we have more param and not ended with PATH_SEPARATOR - || more != null && more.length > 0 && !more[more.length-1].endsWith(PATH_SEPARATOR))) { - this.uri = this.uri.substring(0, this.uri.length() - 1); - } - - this.fileSystem = fileSystem; - } - - /** - * Remove duplicated slash - */ - private String normalizeURI(String uri) { - return uri.replace("//", "/"); - } - - public S3FileStore getFileStore() { - return fileStore; - } - - /** - * key for amazon without final slash. - * note: the final slash need to be added to save a directory (Amazon s3 spec) - * - * @return the key for AmazonS3Client - */ - public String getKey() { - - String key = this.uri; - - if (key.startsWith("/")) { - key = key.substring(1, key.length()); - } - - return key; - } - - @Override - public S3FileSystem getFileSystem() { - return this.fileSystem; - } - - @Override - public boolean isAbsolute() { - return fileStore != null; - } - - @Override - public Path getRoot() { - if (isAbsolute()) { - return new S3Path(fileSystem, PATH_SEPARATOR + fileStore.name() + PATH_SEPARATOR); - } - - return null; - } - - @Override - public Path getFileName() { - List paths = uriToList(); - if (paths.isEmpty()) { - // get FileName of root directory is null - return null; - } - String filename = paths.get(paths.size()-1); - return new S3Path(fileSystem, filename); - } - - @Override - public Path getParent() { - // bucket is not present in the parts - if (uri.isEmpty()) { - return null; - } - - String newUri = this.uri; - - if (this.uri.endsWith("/")) { - newUri = this.uri.substring(0, this.uri.length()-1); - } - int lastPathSeparatorPosition = newUri.lastIndexOf(PATH_SEPARATOR); - - if (lastPathSeparatorPosition == -1) { - return null; - } - - newUri = uri.substring(0, lastPathSeparatorPosition + 1); - - if (newUri.isEmpty()) - return null; - - String filestore = isAbsolute() ? PATH_SEPARATOR + fileStore.name() + PATH_SEPARATOR : ""; - - return new S3Path(fileSystem, filestore + newUri); - } - - @Override - public int getNameCount() { - return uriToList().size(); - } - - @Override - public Path getName(int index) { - - List paths = uriToList(); - - if (index < 0 || index >= paths.size()) { - throw new IllegalArgumentException("index out of range"); - } - - String path = paths.get(index); - StringBuilder pathsBuilder = new StringBuilder(); - if (isAbsolute() && index == 0) { - pathsBuilder.append(PATH_SEPARATOR + fileStore.name() + PATH_SEPARATOR); - } - pathsBuilder.append(path); - - if (index < paths.size() - 1) { - pathsBuilder.append(PATH_SEPARATOR); - } - - // if is the last path, check if end with path separator - if (index == paths.size() - 1 && this.uri.endsWith(PATH_SEPARATOR)) { - pathsBuilder.append(PATH_SEPARATOR); - } - - return new S3Path(fileSystem, pathsBuilder.toString()); - } - - private List uriToList() { - return Splitter.on(PATH_SEPARATOR).omitEmptyStrings().splitToList(this.uri); - } - - /** - * The bucket name not count - */ - @Override - public Path subpath(int beginIndex, int endIndex) { - - List paths = uriToList(); - - if (beginIndex < 0 || endIndex > paths.size()) { - throw new IllegalArgumentException("index out of range"); - } - - List pathSubList = paths.subList(beginIndex, endIndex); - StringBuilder pathsStringBuilder = new StringBuilder(); - - // build path string - - if (this.isAbsolute() && beginIndex == 0) { - pathsStringBuilder.append(PATH_SEPARATOR + fileStore.name() + PATH_SEPARATOR); - } - for (String path : pathSubList) { - pathsStringBuilder.append(path).append(PATH_SEPARATOR); - } - String pathsResult = pathsStringBuilder.toString(); - // if the uri doesnt have last PATH_SEPARATOR we must remove it. - if (endIndex == paths.size() && !this.uri.endsWith(PATH_SEPARATOR)) { - pathsResult = pathsResult.substring(0, pathsResult.length() - 1); - } - - return new S3Path(fileSystem, pathsResult); - } - - @Override - public boolean startsWith(Path other) { - - if (other.getNameCount() > this.getNameCount()) { - return false; - } - - if (!(other instanceof S3Path)) { - return false; - } - - if (this.isAbsolute() && !other.isAbsolute()) { - return false; - } - - S3Path path = (S3Path) other; - - if (this.isAbsolute() && other.isAbsolute() && - !this.fileStore.name().equals(path.fileStore.name())) { - return false; - } - - if (path.uri.isEmpty() && !this.uri.isEmpty()) { - return false; - } - - List pathsOther = path.uriToList(); - List paths = this.uriToList(); - - - for (int i = 0; i < pathsOther.size(); i++) { - if (!pathsOther.get(i).equals(paths.get(i))) { - return false; - } - } - return true; - } - - @Override - public boolean startsWith(String path) { - S3Path other = new S3Path(this.fileSystem, path); - return this.startsWith(other); - } - - @Override - public boolean endsWith(Path other) { - if (other.getNameCount() > this.getNameCount()) { - return false; - } - // empty - if (other.getNameCount() == 0 && this.getNameCount() != 0) { - return false; - } - - if (!(other instanceof S3Path)) { - return false; - } - - S3Path path = (S3Path) other; - - if ((path.getFileStore() != null && !path.getFileStore().equals(this.getFileStore())) || (path.getFileStore() != null && this.getFileStore() == null)) { - return false; - } - - // check subkeys - - List pathsOther = path.uriToList(); - List paths = this.uriToList(); - - - int i = pathsOther.size() - 1; - int j = paths.size() - 1; - for (; i >= 0 && j >= 0; ) { - - if (!pathsOther.get(i).equals(paths.get(j))) { - return false; - } - i--; - j--; - } - return true; - } - - @Override - public boolean endsWith(String other) { - return this.endsWith(new S3Path(this.fileSystem, other)); - } - - @Override - public Path normalize() { - return this; - } - - @Override - public Path resolve(Path other) { - if (other.isAbsolute()) { - Preconditions.checkArgument(other instanceof S3Path, "other must be an instance of %s", S3Path.class.getName()); - return other; - } - - S3Path otherS3Path = (S3Path) other; - StringBuilder pathBuilder = new StringBuilder(); - - if (this.isAbsolute()) { - pathBuilder.append(PATH_SEPARATOR + this.fileStore.name() + PATH_SEPARATOR); - } - pathBuilder.append(this.uri); - if (!otherS3Path.uri.isEmpty()) - pathBuilder.append(PATH_SEPARATOR + otherS3Path.uri); - - return new S3Path(this.fileSystem, pathBuilder.toString()); - } - - @Override - public Path resolve(String other) { - return resolve(new S3Path(this.getFileSystem(), other)); - } - - @Override - public Path resolveSibling(Path other) { - Preconditions.checkArgument(other instanceof S3Path, "other must be an instance of %s", S3Path.class.getName()); - - S3Path s3Path = (S3Path) other; - - Path parent = getParent(); - - if (parent == null || s3Path.isAbsolute()) { - return s3Path; - } - - List othersPaths = s3Path.uriToList(); - - if (othersPaths.isEmpty()) { // other is relative and empty - return parent; - } - - List paths = this.uriToList(); - - StringBuilder pathBuilder = new StringBuilder(); - String lastPath = othersPaths.get(othersPaths.size() - 1); - if (isAbsolute()) { - pathBuilder.append(PATH_SEPARATOR + fileStore.name() + PATH_SEPARATOR); - } - for (String path : concat(paths.subList(0, paths.size() - 1), othersPaths)) { - pathBuilder.append(path); - if (!lastPath.equals(path) || s3Path.uri.endsWith(PATH_SEPARATOR)) { - pathBuilder.append(PATH_SEPARATOR); - } - } - - return new S3Path(fileSystem, pathBuilder.toString()); - } - - @Override - public Path resolveSibling(String other) { - return resolveSibling(new S3Path(this.getFileSystem(), other)); - } - - @Override - public Path relativize(Path other) { - Preconditions.checkArgument(other instanceof S3Path, "other must be an instance of %s", S3Path.class.getName()); - S3Path s3Path = (S3Path) other; - - if (this.equals(other)) { - return new S3Path(this.getFileSystem(), ""); - } - - Preconditions.checkArgument(isAbsolute(), "Path is already relative: %s", this); - Preconditions.checkArgument(s3Path.isAbsolute(), "Cannot relativize against a relative path: %s", s3Path); - Preconditions.checkArgument(fileStore.equals(s3Path.getFileStore()), "Cannot relativize paths with different buckets: '%s', '%s'", this, other); - // Preconditions.checkArgument(parts.size() <= s3Path.parts.size(), "Cannot relativize against a parent path: '%s', '%s'", this, other); - - String uriPath = decode(URI.create(encode(this.uri)).relativize(URI.create(encode(s3Path.uri)))); - return new S3Path(fileSystem, uriPath); - } - - /** - * Examples: - * - * Relative: - * -------- - * NO use fileSystem and not used fileStore. - * - path/file - * - * Absolute: - * -------- - * Use the fileSystem to get the host and the filestore to get the first path (in the future the filestore can be attached to the host) - * http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html - * - s3://AMAZONACCESSKEY@s3.amazonaws.com/bucket/path/file - * - s3://AMAZONACCESSKEY@bucket.s3.amazonaws.com/path/file - * - s3://s3-aws-region.amazonaws.com/bucket/path/file - * - * @return URI never null - */ - @Override - public URI toUri() { - - String uri = encode(this.uri); - // absolute - if (this.isAbsolute()) { - StringBuilder builder = new StringBuilder(); - builder.append(fileSystem.getKey()); - builder.append(PATH_SEPARATOR + fileStore.name() + PATH_SEPARATOR); - builder.append(uri); - return URI.create("s3://" + normalizeURI(builder.toString())); - } - else { - return URI.create(uri); - } - } - - /** - * Get the url for the s3Path. - * - * The url represents a Uniform Resource - * Locator, a pointer to a "resource" on the World - * Wide Web. - * - * All S3Path has a URL if is absolute - * - * @see com.amazonaws.services.s3.AmazonS3#getUrl(String, String) - * @see S3Path#toUri() for unique resource identifier - * @return URL or null if is not absoulte - */ - public URL toURL() { - if (!this.isAbsolute()) - return null; - - return this.getFileSystem().getClient().getUrl(this.fileStore.name(), this.getKey()); - } - - @Override - public Path toAbsolutePath() { - if (isAbsolute()) { - return this; - } - - throw new IllegalStateException(format("Relative path cannot be made absolute: %s", this)); - } - - @Override - public Path toRealPath(LinkOption... options) throws IOException { - return toAbsolutePath(); - } - - @Override - public File toFile() { - throw new UnsupportedOperationException(); - } - - @Override - public WatchKey register(WatchService watcher, WatchEvent.Kind[] events, WatchEvent.Modifier... modifiers) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public WatchKey register(WatchService watcher, WatchEvent.Kind... events) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public Iterator iterator() { - - ImmutableList.Builder builder = ImmutableList.builder(); - - if (isAbsolute()) { - builder.add(new S3Path(fileSystem, PATH_SEPARATOR + fileStore.name() + PATH_SEPARATOR)); - } - - List paths = uriToList(); - - if (uriToList().isEmpty()) - return builder.build().iterator(); - - String lastPath = paths.get(paths.size() - 1); - - for (String path : uriToList()) { - String pathFinal = path + PATH_SEPARATOR; - if (path.equals(lastPath) && !lastPath.endsWith(PATH_SEPARATOR)) { - pathFinal = pathFinal.substring(0, pathFinal.length() - 1); - } - builder.add(new S3Path(fileSystem, pathFinal)); - } - - return builder.build().iterator(); - } - - @Override - public int compareTo(Path other) { - return toString().compareTo(other.toString()); - } - - @Override - public String toString() { - return toUri().toString(); - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - - S3Path path = (S3Path) o; - if (fileStore != null ? !fileStore.equals(path.fileStore) : path.fileStore != null) - return false; - if (!uri.equals(path.uri)) - return false; - return true; - } - - @Override - public int hashCode() { - int result = fileStore != null ? fileStore.name().hashCode() : 0; - result = 31 * result + uri.hashCode(); - return result; - } - - /** - * Encode special URI characters for path. - * @param uri String the uri path - * @return String - */ - private String encode(String uri) { - // remove special case URI starting with // - uri = uri.replace("//", "/"); - uri = uri.replaceAll(" ", "%20"); - return uri; - } - - /** - * Decode uri special characters - * - * @param uri URI mandatory - * @return String decoded - */ - private String decode(URI uri) { - try { - return URLDecoder.decode(uri.toString(), "UTF-8"); - } - catch (UnsupportedEncodingException e) { - throw new IllegalStateException("Error decoding key: " + this.uri, e); - } - } - - public S3BasicFileAttributes getFileAttributes() { - return fileAttributes; - } - - public void setFileAttributes(S3BasicFileAttributes fileAttributes) { - this.fileAttributes = fileAttributes; - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3ReadOnlySeekableByteChannel.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3ReadOnlySeekableByteChannel.java deleted file mode 100644 index d9f0bae4788..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3ReadOnlySeekableByteChannel.java +++ /dev/null @@ -1,158 +0,0 @@ -package com.scalableminds.webknossos.datastore.s3fs; - -import com.amazonaws.services.s3.model.GetObjectRequest; -import com.amazonaws.services.s3.model.S3Object; -import java.io.BufferedInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.nio.ByteBuffer; -import java.nio.channels.Channels; -import java.nio.channels.NonWritableChannelException; -import java.nio.channels.ReadableByteChannel; -import java.nio.channels.SeekableByteChannel; -import java.nio.file.OpenOption; -import java.nio.file.ReadOnlyFileSystemException; -import java.nio.file.StandardOpenOption; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; - -import static java.lang.String.format; - -public class S3ReadOnlySeekableByteChannel implements SeekableByteChannel { - - private static final int DEFAULT_BUFFER_SIZE = 64000; - - private S3Path path; - private Set options; - private long length; - private ExtBufferedInputStream bufferedStream; - private ReadableByteChannel rbc; - private long position = 0; - - /** - * Open or creates a file, returning a seekable byte channel - * - * @param path the path open or create - * @param options options specifying how the file is opened - * @throws IOException if an I/O error occurs - */ - public S3ReadOnlySeekableByteChannel(S3Path path, Set options) throws IOException { - this.path = path; - this.options = Collections.unmodifiableSet(new HashSet<>(options)); - - String key = path.getKey(); - //boolean exists = path.getFileSystem().provider().exists(path); - - //if (!exists) { - // throw new NoSuchFileException(format("target not exists: %s", path)); - //} else if ( - if ( - this.options.contains(StandardOpenOption.WRITE) || - this.options.contains(StandardOpenOption.CREATE) || - this.options.contains(StandardOpenOption.CREATE_NEW) || - this.options.contains(StandardOpenOption.APPEND) - ) { - throw new ReadOnlyFileSystemException(); - } - - this.length = path - .getFileSystem() - .getClient() - .getObjectMetadata( - path - .getFileStore() - .name(), - key - ) - .getContentLength(); - openStreamAt(0); - } - - private void openStreamAt(long position) throws IOException { - if (rbc != null) { - rbc.close(); - } - - GetObjectRequest rangeObjectRequest = - new GetObjectRequest( - path.getFileStore().name(), - path.getKey() - ) - .withRange(position); - - S3Object s3Object = - path - .getFileSystem() - .getClient() - .getObject(rangeObjectRequest); - - bufferedStream = - new ExtBufferedInputStream( - s3Object.getObjectContent(), - DEFAULT_BUFFER_SIZE - ); - - rbc = Channels.newChannel(bufferedStream); - this.position = position; - } - - public boolean isOpen() { - return rbc.isOpen(); - } - - public long position() { return position; } - - public SeekableByteChannel position(long targetPosition) - throws IOException - { - long offset = targetPosition - position(); - if (offset > 0 && offset < bufferedStream.getBytesInBufferAvailable()) { - long skipped = bufferedStream.skip(offset); - if (skipped != offset) { - // shouldn't happen since we are within the buffer - throw new IOException("Could not seek to " + targetPosition); - } - position += offset; - } else if (offset != 0) { - openStreamAt(targetPosition); - } - return this; - } - - public int read(ByteBuffer dst) throws IOException { - int n = rbc.read(dst); - if (n > 0) { - position += n; - } - return n; - } - - public SeekableByteChannel truncate(long size) { - throw new NonWritableChannelException(); - } - - public int write (ByteBuffer src) { - throw new NonWritableChannelException(); - } - - public long size() { - return length; - } - - public void close() throws IOException { - rbc.close(); - } - - private class ExtBufferedInputStream extends BufferedInputStream { - private ExtBufferedInputStream(final InputStream inputStream, final int size) { - super(inputStream, size); - } - - /** Returns the number of bytes that can be read from the buffer without reading more into the buffer. */ - int getBytesInBufferAvailable() { - if (this.count == this.pos) return 0; - else return this.buf.length - this.pos; - } - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3SeekableByteChannel.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3SeekableByteChannel.java deleted file mode 100644 index 4d4ff0afbb9..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/S3SeekableByteChannel.java +++ /dev/null @@ -1,145 +0,0 @@ -package com.scalableminds.webknossos.datastore.s3fs; - -import static java.lang.String.format; - -import java.io.BufferedInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.nio.ByteBuffer; -import java.nio.channels.SeekableByteChannel; -import java.nio.file.*; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; - -import org.apache.tika.Tika; - -import com.amazonaws.services.s3.model.ObjectMetadata; -import com.amazonaws.services.s3.model.S3Object; - -public class S3SeekableByteChannel implements SeekableByteChannel { - - private S3Path path; - private Set options; - private SeekableByteChannel seekable; - private Path tempFile; - - /** - * Open or creates a file, returning a seekable byte channel - * - * @param path the path open or create - * @param options options specifying how the file is opened - * @throws IOException if an I/O error occurs - */ - public S3SeekableByteChannel(S3Path path, Set options) throws IOException { - this.path = path; - this.options = Collections.unmodifiableSet(new HashSet<>(options)); - String key = path.getKey(); - boolean exists = path.getFileSystem().provider().exists(path); - - if (exists && this.options.contains(StandardOpenOption.CREATE_NEW)) - throw new FileAlreadyExistsException(format("target already exists: %s", path)); - else if (!exists && !this.options.contains(StandardOpenOption.CREATE_NEW) && - !this.options.contains(StandardOpenOption.CREATE)) - throw new NoSuchFileException(format("target not exists: %s", path)); - - tempFile = Files.createTempFile("temp-s3-", key.replaceAll("/", "_")); - boolean removeTempFile = true; - try { - if (exists) { - try (S3Object object = path.getFileSystem() - .getClient() - .getObject(path.getFileStore().getBucket().getName(), key)) { - Files.copy(object.getObjectContent(), tempFile, StandardCopyOption.REPLACE_EXISTING); - } - } - - Set seekOptions = new HashSet<>(this.options); - seekOptions.remove(StandardOpenOption.CREATE_NEW); - seekable = Files.newByteChannel(tempFile, seekOptions); - removeTempFile = false; - } finally { - if (removeTempFile) { - Files.deleteIfExists(tempFile); - } - } - } - - @Override - public boolean isOpen() { - return seekable.isOpen(); - } - - @Override - public void close() throws IOException { - try { - if (!seekable.isOpen()) - return; - - seekable.close(); - - if (options.contains(StandardOpenOption.DELETE_ON_CLOSE)) { - path.getFileSystem().provider().delete(path); - return; - } - - if (options.contains(StandardOpenOption.READ) && options.size() == 1) { - return; - } - - sync(); - - } finally { - Files.deleteIfExists(tempFile); - } - } - - /** - * try to sync the temp file with the remote s3 path. - * - * @throws IOException if the tempFile fails to open a newInputStream - */ - protected void sync() throws IOException { - try (InputStream stream = new BufferedInputStream(Files.newInputStream(tempFile))) { - ObjectMetadata metadata = new ObjectMetadata(); - metadata.setContentLength(Files.size(tempFile)); - if (path.getFileName() != null) { - metadata.setContentType(new Tika().detect(stream, path.getFileName().toString())); - } - - String bucket = path.getFileStore().name(); - String key = path.getKey(); - path.getFileSystem().getClient().putObject(bucket, key, stream, metadata); - } - } - - @Override - public int write(ByteBuffer src) throws IOException { - return seekable.write(src); - } - - @Override - public SeekableByteChannel truncate(long size) throws IOException { - return seekable.truncate(size); - } - - @Override - public long size() throws IOException { - return seekable.size(); - } - - @Override - public int read(ByteBuffer dst) throws IOException { - return seekable.read(dst); - } - - @Override - public SeekableByteChannel position(long newPosition) throws IOException { - return seekable.position(newPosition); - } - - @Override - public long position() throws IOException { - return seekable.position(); - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3BasicFileAttributeView.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3BasicFileAttributeView.java deleted file mode 100644 index 80d0b62240e..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3BasicFileAttributeView.java +++ /dev/null @@ -1,33 +0,0 @@ -package com.scalableminds.webknossos.datastore.s3fs.attribute; - -import com.scalableminds.webknossos.datastore.s3fs.S3Path; - -import java.io.IOException; -import java.nio.file.attribute.BasicFileAttributeView; -import java.nio.file.attribute.BasicFileAttributes; -import java.nio.file.attribute.FileTime; - - -public class S3BasicFileAttributeView implements BasicFileAttributeView { - - private S3Path s3Path; - - public S3BasicFileAttributeView(S3Path s3Path) { - this.s3Path = s3Path; - } - - @Override - public String name() { - return "basic"; - } - - @Override - public BasicFileAttributes readAttributes() throws IOException { - return s3Path.getFileSystem().provider().readAttributes(s3Path, BasicFileAttributes.class); - } - - @Override - public void setTimes(FileTime lastModifiedTime, FileTime lastAccessTime, FileTime createTime) throws IOException { - // not implemented - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3BasicFileAttributes.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3BasicFileAttributes.java deleted file mode 100644 index 99318debfb8..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3BasicFileAttributes.java +++ /dev/null @@ -1,85 +0,0 @@ -package com.scalableminds.webknossos.datastore.s3fs.attribute; - -import static java.lang.String.format; - -import java.nio.file.attribute.BasicFileAttributes; -import java.nio.file.attribute.FileTime; - -public class S3BasicFileAttributes implements BasicFileAttributes { - private final FileTime lastModifiedTime; - private final long size; - private final boolean directory; - private final boolean regularFile; - private final String key; - private long cacheCreated; - - public S3BasicFileAttributes(String key, FileTime lastModifiedTime, long size, boolean isDirectory, boolean isRegularFile) { - this.key = key; - this.lastModifiedTime = lastModifiedTime; - this.size = size; - this.directory = isDirectory; - this.regularFile = isRegularFile; - - this.cacheCreated = System.currentTimeMillis(); - } - - @Override - public FileTime lastModifiedTime() { - return lastModifiedTime; - } - - @Override - public FileTime lastAccessTime() { - return lastModifiedTime; - } - - @Override - public FileTime creationTime() { - return lastModifiedTime; - } - - @Override - public boolean isRegularFile() { - return regularFile; - } - - @Override - public boolean isDirectory() { - return directory; - } - - @Override - public boolean isSymbolicLink() { - return false; - } - - @Override - public boolean isOther() { - return false; - } - - @Override - public long size() { - return size; - } - - @Override - public Object fileKey() { - return key; - } - - @Override - public String toString() { - return format("[%s: lastModified=%s, size=%s, isDirectory=%s, isRegularFile=%s]", key, lastModifiedTime, size, directory, regularFile); - } - - public long getCacheCreated() { - return cacheCreated; - } - - // for testing - - public void setCacheCreated(long time) { - this.cacheCreated = time; - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3PosixFileAttributeView.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3PosixFileAttributeView.java deleted file mode 100644 index ae25ad465e8..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3PosixFileAttributeView.java +++ /dev/null @@ -1,61 +0,0 @@ -package com.scalableminds.webknossos.datastore.s3fs.attribute; - -import com.scalableminds.webknossos.datastore.s3fs.S3Path; - -import java.io.IOException; -import java.nio.file.attribute.*; -import java.util.Set; - - -public class S3PosixFileAttributeView implements PosixFileAttributeView { - - private S3Path s3Path; - private PosixFileAttributes posixFileAttributes; - - public S3PosixFileAttributeView(S3Path s3Path) { - this.s3Path = s3Path; - } - - @Override - public String name() { - return "posix"; - } - - @Override - public PosixFileAttributes readAttributes() throws IOException { - return read(); - } - - @Override - public UserPrincipal getOwner() throws IOException { - return read().owner(); - } - - @Override - public void setOwner(UserPrincipal owner) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void setPermissions(Set perms) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void setGroup(GroupPrincipal group) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public void setTimes(FileTime lastModifiedTime, FileTime lastAccessTime, FileTime createTime) throws IOException { - // not implemented - } - - public PosixFileAttributes read() throws IOException { - if (posixFileAttributes == null) { - posixFileAttributes = s3Path.getFileSystem() - .provider().readAttributes(s3Path, PosixFileAttributes.class); - } - return posixFileAttributes; - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3PosixFileAttributes.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3PosixFileAttributes.java deleted file mode 100644 index d00d332c0d7..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3PosixFileAttributes.java +++ /dev/null @@ -1,37 +0,0 @@ -package com.scalableminds.webknossos.datastore.s3fs.attribute; - -import java.nio.file.attribute.*; -import java.util.Set; - -import static java.lang.String.format; - -public class S3PosixFileAttributes extends S3BasicFileAttributes implements PosixFileAttributes { - - private UserPrincipal userPrincipal; - private GroupPrincipal groupPrincipal; - private Set posixFilePermissions; - - public S3PosixFileAttributes(String key, FileTime lastModifiedTime, long size, boolean isDirectory, boolean isRegularFile, UserPrincipal userPrincipal, GroupPrincipal groupPrincipal, Set posixFilePermissionSet) { - - super(key, lastModifiedTime, size, isDirectory, isRegularFile); - - this.userPrincipal = userPrincipal; - this.groupPrincipal = groupPrincipal; - this.posixFilePermissions = posixFilePermissionSet; - } - - @Override - public UserPrincipal owner() { - return this.userPrincipal; - } - - @Override - public GroupPrincipal group() { - return this.groupPrincipal; - } - - @Override - public Set permissions() { - return this.posixFilePermissions; - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3UserPrincipal.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3UserPrincipal.java deleted file mode 100644 index 5a0bacc178c..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/attribute/S3UserPrincipal.java +++ /dev/null @@ -1,17 +0,0 @@ -package com.scalableminds.webknossos.datastore.s3fs.attribute; - -import java.nio.file.attribute.UserPrincipal; - -public class S3UserPrincipal implements UserPrincipal { - - private String name; - - public S3UserPrincipal(String name) { - this.name = name; - } - - @Override - public String getName() { - return name; - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/AnonymousAWSCredentialsProvider.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/AnonymousAWSCredentialsProvider.java deleted file mode 100644 index 513431a8ff6..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/AnonymousAWSCredentialsProvider.java +++ /dev/null @@ -1,18 +0,0 @@ -package com.scalableminds.webknossos.datastore.s3fs.util; - -import com.amazonaws.auth.AWSCredentials; -import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.auth.AnonymousAWSCredentials; - -public class AnonymousAWSCredentialsProvider implements AWSCredentialsProvider { - - @Override - public AWSCredentials getCredentials() { - return new AnonymousAWSCredentials(); - } - - @Override - public void refresh() { - - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/AttributesUtils.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/AttributesUtils.java deleted file mode 100644 index 12a4983023c..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/AttributesUtils.java +++ /dev/null @@ -1,100 +0,0 @@ -package com.scalableminds.webknossos.datastore.s3fs.util; - - -import java.nio.file.attribute.BasicFileAttributes; -import java.nio.file.attribute.PosixFileAttributes; -import java.util.HashMap; -import java.util.Map; - -/** - * Utilities to help transforming BasicFileAttributes to Map - */ -public abstract class AttributesUtils { - - /** - * Given a BasicFileAttributes not null then return a Map - * with the keys as the fields of the BasicFileAttributes or PosixFileAttributes and the values - * with the content of the fields - * - * @param attr BasicFileAttributes - * @return Map String Object never null - */ - public static Map fileAttributeToMap(BasicFileAttributes attr) { - Map result = new HashMap<>(); - result.put("creationTime", attr.creationTime()); - result.put("fileKey", attr.fileKey()); - result.put("isDirectory", attr.isDirectory()); - result.put("isOther", attr.isOther()); - result.put("isRegularFile", attr.isRegularFile()); - result.put("isSymbolicLink", attr.isSymbolicLink()); - result.put("lastAccessTime", attr.lastAccessTime()); - result.put("lastModifiedTime", attr.lastModifiedTime()); - result.put("size", attr.size()); - - if (attr instanceof PosixFileAttributes) { - PosixFileAttributes posixAttr = (PosixFileAttributes) attr; - result.put("permissions", posixAttr.permissions()); - result.put("owner", posixAttr.owner()); - result.put("group", posixAttr.group()); - } - - return result; - } - - /** - * transform the java.nio.file.attribute.BasicFileAttributes to Map filtering by the keys - * given in the filters param - * - * @param attr BasicFileAttributes not null to tranform to map - * @param filters String[] filters - * @return Map String Object with the same keys as the filters - */ - public static Map fileAttributeToMap(BasicFileAttributes attr, String[] filters) { - Map result = new HashMap<>(); - - for (String filter : filters) { - filter = filter.replace("basic:", ""); - filter = filter.replace("posix:", ""); - switch (filter) { - case "creationTime": - result.put("creationTime", attr.creationTime()); - break; - case "fileKey": - result.put("fileKey", attr.fileKey()); - break; - case "isDirectory": - result.put("isDirectory", attr.isDirectory()); - break; - case "isOther": - result.put("isOther", attr.isOther()); - break; - case "isRegularFile": - result.put("isRegularFile", attr.isRegularFile()); - break; - case "isSymbolicLink": - result.put("isSymbolicLink", attr.isSymbolicLink()); - break; - case "lastAccessTime": - result.put("lastAccessTime", attr.lastAccessTime()); - break; - case "lastModifiedTime": - result.put("lastModifiedTime", attr.lastModifiedTime()); - break; - case "size": - result.put("size", attr.size()); - break; - case "permissions": - result.put("permissions", ((PosixFileAttributes)attr).permissions()); - break; - case "group": - result.put("group", ((PosixFileAttributes)attr).group()); - break; - case "owner": - result.put("owner", ((PosixFileAttributes)attr).owner()); - break; - } - } - - return result; - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/Cache.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/Cache.java deleted file mode 100644 index 7ca13ab00a0..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/Cache.java +++ /dev/null @@ -1,29 +0,0 @@ -package com.scalableminds.webknossos.datastore.s3fs.util; - -import com.scalableminds.webknossos.datastore.s3fs.attribute.S3BasicFileAttributes; - -public class Cache { - - /** - * check if the cache of the S3FileAttributes is still valid - * - * @param cache int cache time of the fileAttributes in milliseconds - * @param fileAttributes S3FileAttributes to check if is still valid, can be null - * @return true or false, if cache are -1 and fileAttributes are not null then always return true - */ - public boolean isInTime(int cache, S3BasicFileAttributes fileAttributes) { - if (fileAttributes == null) { - return false; - } - - if (cache == -1) { - return true; - } - - return getCurrentTime() - cache <= fileAttributes.getCacheCreated(); - } - - public long getCurrentTime() { - return System.currentTimeMillis(); - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/IOUtils.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/IOUtils.java deleted file mode 100644 index 65d15be9e53..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/IOUtils.java +++ /dev/null @@ -1,32 +0,0 @@ -package com.scalableminds.webknossos.datastore.s3fs.util; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.InputStream; - -/** - * Utilities for streams - */ -public abstract class IOUtils { - /** - * get the stream content and return as a byte array - * - * @param is InputStream - * @return byte array - * @throws IOException if the stream is closed - */ - public static byte[] toByteArray(InputStream is) throws IOException { - ByteArrayOutputStream buffer = new ByteArrayOutputStream(); - - int nRead; - byte[] data = new byte[16384]; - - while ((nRead = is.read(data, 0, data.length)) != -1) { - buffer.write(data, 0, nRead); - } - - buffer.flush(); - - return buffer.toByteArray(); - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/S3Utils.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/S3Utils.java deleted file mode 100644 index 0716563ad8b..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/s3fs/util/S3Utils.java +++ /dev/null @@ -1,198 +0,0 @@ -package com.scalableminds.webknossos.datastore.s3fs.util; - -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.*; -import com.google.common.collect.Sets; -import com.scalableminds.webknossos.datastore.s3fs.attribute.S3BasicFileAttributes; -import com.scalableminds.webknossos.datastore.s3fs.attribute.S3PosixFileAttributes; -import com.scalableminds.webknossos.datastore.s3fs.attribute.S3UserPrincipal; -import com.scalableminds.webknossos.datastore.s3fs.S3Path; - -import java.nio.file.NoSuchFileException; -import java.nio.file.attribute.FileTime; -import java.nio.file.attribute.PosixFilePermission; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.TimeUnit; - -/** - * Utilities to work with Amazon S3 Objects. - */ -public class S3Utils { - - /** - * Get the {@link S3ObjectSummary} that represent this Path or her first child if this path not exists - * - * @param s3Path {@link S3Path} - * @return {@link S3ObjectSummary} - * @throws NoSuchFileException if not found the path and any child - */ - public S3ObjectSummary getS3ObjectSummary(S3Path s3Path) throws NoSuchFileException { - String key = s3Path.getKey(); - String bucketName = s3Path.getFileStore().name(); - AmazonS3 client = s3Path.getFileSystem().getClient(); - // try to find the element with the current key (maybe with end slash or maybe not.) - try { - ObjectMetadata metadata = client.getObjectMetadata(bucketName, key); - S3ObjectSummary result = new S3ObjectSummary(); - result.setBucketName(bucketName); - result.setETag(metadata.getETag()); - result.setKey(key); - result.setLastModified(metadata.getLastModified()); - result.setSize(metadata.getContentLength()); - try { - AccessControlList objectAcl = client.getObjectAcl(bucketName, key); - result.setOwner(objectAcl.getOwner()); - } catch (AmazonS3Exception e) { - // If we don't have permission to view the ACL, that's fine, we can leave `owner` empty - if (e.getStatusCode() != 403) - throw e; - } - return result; - } catch (AmazonS3Exception e) { - if (e.getStatusCode() != 404) - throw e; - } - - // if not found (404 err) with the original key. - // try to find the elment as a directory. - try { - // is a virtual directory - ListObjectsRequest request = new ListObjectsRequest(); - request.setBucketName(bucketName); - String keyFolder = key; - if (!keyFolder.endsWith("/")) { - keyFolder += "/"; - } - request.setPrefix(keyFolder); - request.setMaxKeys(1); - ObjectListing current = client.listObjects(request); - if (!current.getObjectSummaries().isEmpty()) - return current.getObjectSummaries().get(0); - } catch (Exception e) { - // - } - throw new NoSuchFileException(bucketName + S3Path.PATH_SEPARATOR + key); - } - - /** - * getS3FileAttributes for the s3Path - * - * @param s3Path S3Path mandatory not null - * @return S3FileAttributes never null - */ - public S3BasicFileAttributes getS3FileAttributes(S3Path s3Path) throws NoSuchFileException { - S3ObjectSummary objectSummary = getS3ObjectSummary(s3Path); - return toS3FileAttributes(objectSummary, s3Path.getKey()); - } - - /** - * get the S3PosixFileAttributes for a S3Path - * @param s3Path Path mandatory not null - * @return S3PosixFileAttributes never null - * @throws NoSuchFileException if the Path doesnt exists - */ - public S3PosixFileAttributes getS3PosixFileAttributes(S3Path s3Path) throws NoSuchFileException { - S3ObjectSummary objectSummary = getS3ObjectSummary(s3Path); - - String key = s3Path.getKey(); - String bucketName = s3Path.getFileStore().name(); - - S3BasicFileAttributes attrs = toS3FileAttributes(objectSummary, key); - S3UserPrincipal userPrincipal = null; - Set permissions = null; - - if (!attrs.isDirectory()) { - AmazonS3 client = s3Path.getFileSystem().getClient(); - AccessControlList acl = client.getObjectAcl(bucketName, key); - Owner owner = acl.getOwner(); - - userPrincipal = new S3UserPrincipal(owner.getId() + ":" + owner.getDisplayName()); - permissions = toPosixFilePermissions(acl.getGrantsAsList()); - } - - return new S3PosixFileAttributes((String)attrs.fileKey(), attrs.lastModifiedTime(), - attrs.size(), attrs.isDirectory(), attrs.isRegularFile(), userPrincipal, null, permissions); - } - - - /** - * transform com.amazonaws.services.s3.model.Grant to java.nio.file.attribute.PosixFilePermission - * @see #toPosixFilePermission(Permission) - * @param grants Set grants mandatory, must be not null - * @return Set PosixFilePermission never null - */ - public Set toPosixFilePermissions(List grants) { - Set filePermissions = new HashSet<>(); - for (Grant grant : grants) { - filePermissions.addAll(toPosixFilePermission(grant.getPermission())); - } - - return filePermissions; - } - - /** - * transform a com.amazonaws.services.s3.model.Permission to a java.nio.file.attribute.PosixFilePermission - * We use the follow rules: - * - transform only to the Owner permission, S3 doesnt have concepts like owner, group or other so we map only to owner. - * - ACP is a special permission: WriteAcp are mapped to Owner execute permission and ReadAcp are mapped to owner read - * @param permission Permission to map, mandatory must be not null - * @return Set PosixFilePermission never null - */ - public Set toPosixFilePermission(Permission permission){ - switch (permission) { - case FullControl: - return Sets.newHashSet(PosixFilePermission.OWNER_EXECUTE, - PosixFilePermission.OWNER_READ, - PosixFilePermission.OWNER_WRITE); - case Write: - return Sets.newHashSet(PosixFilePermission.OWNER_WRITE); - case Read: - return Sets.newHashSet(PosixFilePermission.OWNER_READ); - case ReadAcp: - return Sets.newHashSet(PosixFilePermission.OWNER_READ); - case WriteAcp: - return Sets.newHashSet(PosixFilePermission.OWNER_EXECUTE); - } - throw new IllegalStateException("Unknown Permission: " + permission); - } - - /** - * transform S3ObjectSummary to S3FileAttributes - * - * @param objectSummary S3ObjectSummary mandatory not null, the real objectSummary with - * exactly the same key than the key param or the immediate descendant - * if it is a virtual directory - * @param key String the real key that can be exactly equal than the objectSummary or - * @return S3FileAttributes - */ - public S3BasicFileAttributes toS3FileAttributes(S3ObjectSummary objectSummary, String key) { - // parse the data to BasicFileAttributes. - FileTime lastModifiedTime = null; - if (objectSummary.getLastModified() != null) { - lastModifiedTime = FileTime.from(objectSummary.getLastModified().getTime(), TimeUnit.MILLISECONDS); - } - long size = objectSummary.getSize(); - boolean directory = false; - boolean regularFile = false; - String resolvedKey = objectSummary.getKey(); - // check if is a directory and exists the key of this directory at amazon s3 - if (key.endsWith("/") && resolvedKey.equals(key) || - resolvedKey.equals(key + "/")) { - directory = true; - } else if (key.isEmpty()) { // is a bucket (no key) - directory = true; - resolvedKey = "/"; - } else if (!resolvedKey.equals(key) && resolvedKey.startsWith(key)) { // is a directory but not exists at amazon s3 - directory = true; - // no metadata, we fake one - size = 0; - // delete extra part - resolvedKey = key + "/"; - } else { - regularFile = true; - } - return new S3BasicFileAttributes(resolvedKey, lastModifiedTime, size, directory, regularFile); - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/DataVaultsHolder.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/DataVaultsHolder.scala new file mode 100644 index 00000000000..12f2968f14c --- /dev/null +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/DataVaultsHolder.scala @@ -0,0 +1,56 @@ +package com.scalableminds.webknossos.datastore.storage + +import com.scalableminds.util.cache.AlfuFoxCache +import com.scalableminds.util.tools.Fox +import com.scalableminds.webknossos.datastore.datavault.{ + DataVault, + GoogleCloudDataVault, + HttpsDataVault, + S3DataVault, + VaultPath +} +import com.typesafe.scalalogging.LazyLogging +import net.liftweb.common.Full + +import scala.concurrent.ExecutionContext + +object DataVaultsHolder extends LazyLogging { + + val schemeS3: String = "s3" + val schemeHttps: String = "https" + val schemeHttp: String = "http" + val schemeGS: String = "gs" + + private val vaultCache: AlfuFoxCache[RemoteSourceDescriptor, DataVault] = + AlfuFoxCache(maxEntries = 100) + + def isSupportedRemoteScheme(uriScheme: String): Boolean = + List(schemeS3, schemeHttps, schemeHttp, schemeGS).contains(uriScheme) + + def getVaultPath(remoteSourceDescriptor: RemoteSourceDescriptor)(implicit ec: ExecutionContext): Fox[VaultPath] = + for { + vault <- vaultCache.getOrLoad(remoteSourceDescriptor, create) + } yield new VaultPath(remoteSourceDescriptor.uri, vault) + + private def create(remoteSource: RemoteSourceDescriptor)(implicit ec: ExecutionContext): Fox[DataVault] = { + val scheme = remoteSource.uri.getScheme + try { + val fs: DataVault = if (scheme == schemeGS) { + GoogleCloudDataVault.create(remoteSource) + } else if (scheme == schemeS3) { + S3DataVault.create(remoteSource) + } else if (scheme == schemeHttps || scheme == schemeHttp) { + HttpsDataVault.create(remoteSource) + } else { + throw new Exception(s"Unknown file system scheme $scheme") + } + logger.info(s"Successfully created file system for ${remoteSource.uri.toString}") + Fox.successful(fs) + } catch { + case e: Exception => + val msg = s"get file system errored for ${remoteSource.uri.toString}:" + logger.error(msg, e) + Fox.failure(msg, Full(e)) + } + } +} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/FileSystemService.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/FileSystemService.scala index 22028faed25..e79ec31d094 100644 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/FileSystemService.scala +++ b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/FileSystemService.scala @@ -2,11 +2,10 @@ package com.scalableminds.webknossos.datastore.storage import com.scalableminds.util.tools.Fox import com.scalableminds.webknossos.datastore.dataformats.MagLocator +import com.scalableminds.webknossos.datastore.datavault.VaultPath import com.scalableminds.webknossos.datastore.services.DSRemoteWebKnossosClient -import net.liftweb.util.Helpers.tryo import java.net.URI -import java.nio.file.Path import javax.inject.Inject import scala.concurrent.ExecutionContext @@ -14,12 +13,11 @@ case class RemoteSourceDescriptor(uri: URI, credential: Option[FileSystemCredent class FileSystemService @Inject()(dSRemoteWebKnossosClient: DSRemoteWebKnossosClient) { - def remotePathFor(magLocator: MagLocator)(implicit ec: ExecutionContext): Fox[Path] = + def remotePathFor(magLocator: MagLocator)(implicit ec: ExecutionContext): Fox[VaultPath] = for { credentialBox <- credentialFor(magLocator: MagLocator).futureBox remoteSource = RemoteSourceDescriptor(magLocator.uri, credentialBox.toOption) - fileSystem <- FileSystemsHolder.getOrCreate(remoteSource) ?~> "remoteFileSystem.setup.failed" - remotePath <- tryo(fileSystem.getPath(FileSystemsHolder.pathFromUri(remoteSource.uri))) + remotePath <- DataVaultsHolder.getVaultPath(remoteSource) ?~> "remoteFileSystem.setup.failed" } yield remotePath private def credentialFor(magLocator: MagLocator)(implicit ec: ExecutionContext): Fox[FileSystemCredential] = diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/FileSystemsHolder.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/FileSystemsHolder.scala deleted file mode 100644 index fa98e5c00af..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/FileSystemsHolder.scala +++ /dev/null @@ -1,100 +0,0 @@ -package com.scalableminds.webknossos.datastore.storage - -import com.google.auth.oauth2.ServiceAccountCredentials -import com.google.cloud.storage.StorageOptions -import com.google.cloud.storage.contrib.nio.{CloudStorageConfiguration, CloudStorageFileSystem} -import com.scalableminds.util.cache.AlfuFoxCache -import com.scalableminds.util.tools.Fox -import com.scalableminds.webknossos.datastore.s3fs.{S3FileSystem, S3FileSystemProvider} -import com.scalableminds.webknossos.datastore.storage.httpsfilesystem.HttpsFileSystem -import com.typesafe.scalalogging.LazyLogging -import net.liftweb.common.Full - -import java.io.ByteArrayInputStream -import java.net.URI -import java.nio.file.FileSystem -import scala.concurrent.ExecutionContext - -object FileSystemsHolder extends LazyLogging { - - val schemeS3: String = "s3" - val schemeHttps: String = "https" - val schemeHttp: String = "http" - val schemeGS: String = "gs" - - private val fileSystemsCache: AlfuFoxCache[RemoteSourceDescriptor, FileSystem] = - AlfuFoxCache(maxEntries = 100) - - def isSupportedRemoteScheme(uriScheme: String): Boolean = - List(schemeS3, schemeHttps, schemeHttp, schemeGS).contains(uriScheme) - - def getOrCreate(remoteSource: RemoteSourceDescriptor)(implicit ec: ExecutionContext): Fox[FileSystem] = - fileSystemsCache.getOrLoad(remoteSource, create) - - def create(remoteSource: RemoteSourceDescriptor)(implicit ec: ExecutionContext): Fox[FileSystem] = { - val scheme = remoteSource.uri.getScheme - try { - val fs: FileSystem = if (scheme == schemeGS) { - getGoogleCloudStorageFileSystem(remoteSource) - } else if (scheme == schemeS3) { - getAmazonS3FileSystem(remoteSource) - } else if (scheme == schemeHttps || scheme == schemeHttp) { - getHttpsFileSystem(remoteSource) - } else { - throw new Exception(s"Unknown file system scheme $scheme") - } - logger.info(s"Successfully created file system for ${remoteSource.uri.toString}") - Fox.successful(fs) - } catch { - case e: Exception => - val msg = s"get file system errored for ${remoteSource.uri.toString}:" - logger.error(msg, e) - Fox.failure(msg, Full(e)) - } - } - - private def getGoogleCloudStorageFileSystem(remoteSource: RemoteSourceDescriptor): FileSystem = { - val bucketName = remoteSource.uri.getHost - val storageOptions = buildCredentialStorageOptions(remoteSource) - CloudStorageFileSystem.forBucket(bucketName, CloudStorageConfiguration.DEFAULT, storageOptions) - } - - private def getAmazonS3FileSystem(remoteSource: RemoteSourceDescriptor): FileSystem = - remoteSource.credential match { - case Some(credential: S3AccessKeyCredential) => - S3FileSystem.forUri(remoteSource.uri, credential.accessKeyId, credential.secretAccessKey) - case Some(credential: LegacyFileSystemCredential) => - S3FileSystem.forUri(remoteSource.uri, - credential.toS3AccessKey.accessKeyId, - credential.toS3AccessKey.secretAccessKey) - case _ => - S3FileSystem.forUri(remoteSource.uri) - } - - private def getHttpsFileSystem(remoteSource: RemoteSourceDescriptor): FileSystem = - remoteSource.credential match { - case Some(credential: HttpBasicAuthCredential) => - HttpsFileSystem.forUri(remoteSource.uri, Some(credential)) - case Some(credential: LegacyFileSystemCredential) => - HttpsFileSystem.forUri(remoteSource.uri, Some(credential.toBasicAuth)) - case _ => - HttpsFileSystem.forUri(remoteSource.uri) - } - - private def buildCredentialStorageOptions(remoteSource: RemoteSourceDescriptor) = - remoteSource.credential match { - case Some(credential: GoogleServiceAccountCredential) => - StorageOptions - .newBuilder() - .setCredentials( - ServiceAccountCredentials.fromStream(new ByteArrayInputStream(credential.secretJson.toString.getBytes))) - .build() - case _ => StorageOptions.newBuilder().build() - } - - def pathFromUri(uri: URI): String = - if (uri.getScheme == schemeS3) { - // There are several s3 uri styles. Normalize, then drop bucket name (and handle leading slash) - "/" + S3FileSystemProvider.resolveShortcutHost(uri).getPath.split("/").drop(2).mkString("/") - } else uri.getPath -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/httpsfilesystem/ByteChannelOptions.java b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/httpsfilesystem/ByteChannelOptions.java deleted file mode 100644 index 35db01a9a57..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/httpsfilesystem/ByteChannelOptions.java +++ /dev/null @@ -1,10 +0,0 @@ -package com.scalableminds.webknossos.datastore.storage.httpsfilesystem; - -import java.nio.file.OpenOption; - -public enum ByteChannelOptions implements OpenOption { - RANGE; - - ByteChannelOptions() { - } -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/httpsfilesystem/HttpsFileSystem.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/httpsfilesystem/HttpsFileSystem.scala deleted file mode 100644 index 39c4faa3594..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/httpsfilesystem/HttpsFileSystem.scala +++ /dev/null @@ -1,58 +0,0 @@ -package com.scalableminds.webknossos.datastore.storage.httpsfilesystem - -import com.scalableminds.webknossos.datastore.storage.HttpBasicAuthCredential - -import java.net.URI -import java.nio.file._ -import java.nio.file.attribute.UserPrincipalLookupService -import java.nio.file.spi.FileSystemProvider -import java.{lang, util} - -object HttpsFileSystem { - def forUri(uri: URI, credential: Option[HttpBasicAuthCredential] = None): HttpsFileSystem = { - - val key = HttpsFileSystemProvider.fileSystemKey(uri, None) - if (HttpsFileSystemProvider.fileSystems.containsKey(key)) { - HttpsFileSystemProvider.fileSystems.get(key) - } else { - val fileSystem = new HttpsFileSystem(new HttpsFileSystemProvider, uri, credential) - HttpsFileSystemProvider.fileSystems.put(key, fileSystem) - fileSystem - } - } - -} - -class HttpsFileSystem(provider: HttpsFileSystemProvider, - uri: URI, - basicAuthCredential: Option[HttpBasicAuthCredential] = None) - extends FileSystem { - override def provider(): FileSystemProvider = provider - - override def close(): Unit = ??? - - override def isOpen: Boolean = ??? - - override def isReadOnly: Boolean = ??? - - override def getSeparator: String = ??? - - override def getRootDirectories: lang.Iterable[Path] = ??? - - override def getFileStores: lang.Iterable[FileStore] = ??? - - override def supportedFileAttributeViews(): util.Set[String] = ??? - - override def getPath(s: String, strings: String*): Path = - new HttpsPath(uri.resolve(s), fileSystem = this) - - override def getPathMatcher(s: String): PathMatcher = ??? - - override def getUserPrincipalLookupService: UserPrincipalLookupService = ??? - - override def newWatchService(): WatchService = ??? - - def getKey: String = HttpsFileSystemProvider.fileSystemKey(uri, basicAuthCredential) - - def getBasicAuthCredential: Option[HttpBasicAuthCredential] = basicAuthCredential -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/httpsfilesystem/HttpsFileSystemProvider.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/httpsfilesystem/HttpsFileSystemProvider.scala deleted file mode 100644 index 7f347d30d83..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/httpsfilesystem/HttpsFileSystemProvider.scala +++ /dev/null @@ -1,65 +0,0 @@ -package com.scalableminds.webknossos.datastore.storage.httpsfilesystem - -import java.net.URI -import java.nio.channels.SeekableByteChannel -import java.nio.file.attribute.{BasicFileAttributes, FileAttribute, FileAttributeView} -import java.nio.file.spi.FileSystemProvider -import java.nio.file._ -import java.util -import java.util.concurrent.ConcurrentHashMap -import com.scalableminds.webknossos.datastore.storage.HttpBasicAuthCredential -import com.typesafe.scalalogging.LazyLogging - -object HttpsFileSystemProvider { - val fileSystems: ConcurrentHashMap[String, HttpsFileSystem] = new ConcurrentHashMap[String, HttpsFileSystem] - - def fileSystemKey(uri: URI, basicAuthCredentials: Option[HttpBasicAuthCredential]): String = { - val uriWithUser = basicAuthCredentials.map { c => - new URI(uri.getScheme, c.username, uri.getHost, uri.getPort, uri.getPath, uri.getQuery, uri.getFragment) - }.getOrElse(uri) - uriWithUser.toString - } -} - -class HttpsFileSystemProvider extends FileSystemProvider with LazyLogging { - - override def getScheme: String = "https" // Note that it will also handle http if called with one - - override def newFileSystem(uri: URI, env: util.Map[String, _]): FileSystem = ??? - - override def getFileSystem(uri: URI): FileSystem = ??? - - override def getPath(uri: URI): Path = ??? - - override def newByteChannel(path: Path, - openOptions: util.Set[_ <: OpenOption], - fileAttributes: FileAttribute[_]*): SeekableByteChannel = - new HttpsSeekableByteChannel(path.asInstanceOf[HttpsPath], openOptions) - - override def newDirectoryStream(path: Path, filter: DirectoryStream.Filter[_ >: Path]): DirectoryStream[Path] = ??? - - override def createDirectory(path: Path, fileAttributes: FileAttribute[_]*): Unit = ??? - - override def delete(path: Path): Unit = ??? - - override def copy(path: Path, path1: Path, copyOptions: CopyOption*): Unit = ??? - - override def move(path: Path, path1: Path, copyOptions: CopyOption*): Unit = ??? - - override def isSameFile(path: Path, path1: Path): Boolean = ??? - - override def isHidden(path: Path): Boolean = ??? - - override def getFileStore(path: Path): FileStore = ??? - - override def checkAccess(path: Path, accessModes: AccessMode*): Unit = ??? - - override def getFileAttributeView[V <: FileAttributeView](path: Path, aClass: Class[V], linkOptions: LinkOption*): V = - ??? - - override def readAttributes[A <: BasicFileAttributes](path: Path, aClass: Class[A], linkOptions: LinkOption*): A = ??? - - override def readAttributes(path: Path, s: String, linkOptions: LinkOption*): util.Map[String, AnyRef] = ??? - - override def setAttribute(path: Path, s: String, o: Any, linkOptions: LinkOption*): Unit = ??? -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/httpsfilesystem/HttpsPath.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/httpsfilesystem/HttpsPath.scala deleted file mode 100644 index 3019cf87ed6..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/httpsfilesystem/HttpsPath.scala +++ /dev/null @@ -1,73 +0,0 @@ -package com.scalableminds.webknossos.datastore.storage.httpsfilesystem - -import com.scalableminds.webknossos.datastore.storage.HttpBasicAuthCredential - -import java.io.File -import java.net.URI -import java.nio.file.{FileSystem, LinkOption, Path, Paths, WatchEvent, WatchKey, WatchService} - -class HttpsPath(uri: URI, fileSystem: HttpsFileSystem) extends Path { - override def getFileSystem: FileSystem = fileSystem - - override def isAbsolute: Boolean = ??? - - override def getRoot: Path = ??? - - override def getFileName: Path = Paths.get(uri.toString.split("/").last) - override def getParent: Path = - new HttpsPath(if (uri.getPath.endsWith("/")) uri.resolve("..") - else uri.resolve("."), - fileSystem) - - override def getNameCount: Int = ??? - - override def getName(i: Int): Path = ??? - - override def subpath(i: Int, i1: Int): Path = ??? - - override def startsWith(path: Path): Boolean = ??? - - override def startsWith(s: String): Boolean = ??? - - override def endsWith(path: Path): Boolean = ??? - - override def endsWith(s: String): Boolean = uri.getPath.endsWith(s) - - override def normalize(): Path = ??? - - override def resolve(path: Path): Path = - // trailing slash added to avoid resolving in the parent level. Note that consecutive slashes are removed - new HttpsPath(URI.create(uri.toString + "/").resolve(path.toString), fileSystem) - - override def resolve(s: String): Path = new HttpsPath(URI.create(uri.toString + "/").resolve(s), fileSystem) - - override def resolveSibling(path: Path): Path = ??? - - override def resolveSibling(s: String): Path = ??? - - override def relativize(path: Path): Path = ??? - - override def toUri: URI = uri - - override def toAbsolutePath: Path = ??? - - override def toRealPath(linkOptions: LinkOption*): Path = ??? - - override def toFile: File = ??? - - override def register(watchService: WatchService, - kinds: Array[WatchEvent.Kind[_]], - modifiers: WatchEvent.Modifier*): WatchKey = ??? - - override def register(watchService: WatchService, kinds: WatchEvent.Kind[_]*): WatchKey = ??? - - override def iterator(): java.util.Iterator[Path] = ??? - - override def compareTo(path: Path): Int = ??? - - def getKey: String = uri.toString - - override def toString: String = uri.toString - - def getBasicAuthCredential: Option[HttpBasicAuthCredential] = fileSystem.getBasicAuthCredential -} diff --git a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/httpsfilesystem/HttpsSeekableByteChannel.scala b/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/httpsfilesystem/HttpsSeekableByteChannel.scala deleted file mode 100644 index aa857da3ba5..00000000000 --- a/webknossos-datastore/app/com/scalableminds/webknossos/datastore/storage/httpsfilesystem/HttpsSeekableByteChannel.scala +++ /dev/null @@ -1,108 +0,0 @@ -package com.scalableminds.webknossos.datastore.storage.httpsfilesystem - -import java.net.URI -import java.nio.ByteBuffer -import java.nio.channels.SeekableByteChannel -import java.nio.file.OpenOption -import java.util - -import sttp.client3._ -import sttp.model.Uri - -import scala.concurrent.duration.DurationInt - -class HttpsSeekableByteChannel(path: HttpsPath, openOptions: util.Set[_ <: OpenOption]) extends SeekableByteChannel { - private var _position: Long = 0L - - private val uri: URI = path.toUri - - private val connectionTimeout = 5 seconds - private val readTimeout = 1 minute - - private var _isOpen: Boolean = true - - private lazy val backend = HttpClientSyncBackend(options = SttpBackendOptions.connectionTimeout(connectionTimeout)) - private val rangeRequestsActive = openOptions.contains(ByteChannelOptions.RANGE) - - private lazy val authenticatedRequest = path.getBasicAuthCredential.map { credential => - basicRequest.auth.basic(credential.username, credential.password) - }.getOrElse( - basicRequest - ) - .readTimeout(readTimeout) - - private lazy val headRequest: Request[Either[String, String], Any] = - authenticatedRequest.head(Uri(uri)) - - private def getHeaderInformation = { - val response: Identity[Response[Either[String, String]]] = backend.send(headRequest) - val acceptsPartialRequests = response.headers.find(_.is("Accept-Ranges")).exists(_.value == "bytes") - val dataSize = response.headers.find(_.is("Content-Length")).map(_.value.toInt).getOrElse(0) - (acceptsPartialRequests, dataSize) - } - - private lazy val headerInfos = getHeaderInformation - private lazy val acceptsPartialRequests: Boolean = headerInfos._1 - - private def getDataRequest: Request[Either[String, Array[Byte]], Any] = - authenticatedRequest.get(Uri(uri)).response(asByteArray) - - private def getRangeRequest(bufferSize: Int): Request[Either[String, Array[Byte]], Any] = - getDataRequest - .header("Range", s"bytes=${_position}-${(bufferSize + _position).min(size()) - 1}") - .response(asByteArray) - - private def getResponse: Identity[Response[Either[String, Array[Byte]]]] = { - val request: Request[Either[String, Array[Byte]], Any] = getDataRequest - backend.send(request) - } - - private def getResponseForRangeRequest(size: Int) = { - val request = getRangeRequest(size) - backend.send(request) - } - - private lazy val fullResponse: Identity[Response[Either[String, Array[Byte]]]] = getResponse - - override def read(byteBuffer: ByteBuffer): Int = { - val response = - if (rangeRequestsActive && acceptsPartialRequests) getResponseForRangeRequest(byteBuffer.limit()) - else fullResponse - if (!response.isSuccess) { - throw new Exception(s"Https read failed for uri $uri: Response was not successful ${response.statusText}") - } - response.body match { - case Left(e) => throw new Exception(s"Https read failed for uri $uri: $e: Response empty") - case Right(bytes) => - val availableBytes = size() - position - val bytesToCopy = availableBytes.min(byteBuffer.limit) - byteBuffer.put(bytes.slice(_position.toInt, (_position + bytesToCopy).toInt)) - _position += bytesToCopy - bytesToCopy.toInt - } - - } - - override def write(byteBuffer: ByteBuffer): Int = ??? - - override def position(): Long = _position - - override def position(l: Long): SeekableByteChannel = { - this._position = l - this - } - - override def size(): Long = - if (rangeRequestsActive) { - headerInfos._2 - } else { - fullResponse.body.getOrElse(Array()).length - } - - override def truncate(l: Long): SeekableByteChannel = ??? - - override def isOpen: Boolean = _isOpen - - override def close(): Unit = - _isOpen = false -}