diff --git a/.github/workflows/auto-cherry-pick.yml b/.github/workflows/auto-cherry-pick.yml index 0a264e83526..efea2823706 100644 --- a/.github/workflows/auto-cherry-pick.yml +++ b/.github/workflows/auto-cherry-pick.yml @@ -7,60 +7,42 @@ on: types: ["closed"] jobs: - cherry_pick_branch_0_5: - runs-on: ubuntu-latest - name: Cherry pick into branch_0.5 - if: ${{ contains(github.event.pull_request.labels.*.name, 'branch-0.5') && github.event.pull_request.merged == true }} - steps: - - name: Checkout - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - name: Cherry pick into branch-0.5 - uses: carloscastrojumo/github-cherry-pick-action@v1.0.9 - with: - branch: branch-0.5 - labels: | - cherry-pick - reviewers: | - jerryshao - - cherry_pick_branch_0_6: + cherry_pick_branch_0_7: runs-on: ubuntu-latest - name: Cherry pick into branch_0.6 - if: ${{ contains(github.event.pull_request.labels.*.name, 'branch-0.6') && github.event.pull_request.merged == true }} + name: Cherry pick into branch_0.7 + if: ${{ contains(github.event.pull_request.labels.*.name, 'branch-0.7') && github.event.pull_request.merged == true }} steps: - name: Checkout uses: actions/checkout@v2 with: fetch-depth: 0 - - name: Cherry pick into branch-0.6 + - name: Cherry pick into branch-0.7 uses: carloscastrojumo/github-cherry-pick-action@v1.0.9 with: - branch: branch-0.6 + branch: branch-0.7 labels: | cherry-pick reviewers: | jerryshao - - cherry_pick_branch_0_7: + cherry_pick_branch_0_8: runs-on: ubuntu-latest - name: Cherry pick into branch_0.7 - if: ${{ contains(github.event.pull_request.labels.*.name, 'branch-0.7') && github.event.pull_request.merged == true }} + name: Cherry pick into branch_0.8 + if: ${{ contains(github.event.pull_request.labels.*.name, 'branch-0.8') && github.event.pull_request.merged == true }} steps: - name: Checkout uses: actions/checkout@v2 with: fetch-depth: 0 - - name: Cherry pick into branch-0.7 + - name: Cherry pick into branch-0.8 uses: carloscastrojumo/github-cherry-pick-action@v1.0.9 with: - branch: branch-0.7 + branch: branch-0.8 labels: | cherry-pick reviewers: | jerryshao + FANNG1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/backend-integration-test.yml b/.github/workflows/backend-integration-test.yml index 1958163f863..085c508ad39 100644 --- a/.github/workflows/backend-integration-test.yml +++ b/.github/workflows/backend-integration-test.yml @@ -39,6 +39,7 @@ jobs: - meta/** - scripts/** - server/** + - bundles/** - server-common/** - build.gradle.kts - gradle.properties diff --git a/.github/workflows/gvfs-fuse-build-test.yml b/.github/workflows/gvfs-fuse-build-test.yml new file mode 100644 index 00000000000..4af01d82da3 --- /dev/null +++ b/.github/workflows/gvfs-fuse-build-test.yml @@ -0,0 +1,89 @@ +name: Build gvfs-fuse and testing + +# Controls when the workflow will run +on: + push: + branches: [ "main", "branch-*" ] + pull_request: + branches: [ "main", "branch-*" ] + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + changes: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: dorny/paths-filter@v2 + id: filter + with: + filters: | + source_changes: + - .github/** + - api/** + - bin/** + - catalogs/hadoop/** + - clients/filesystem-fuse/** + - common/** + - conf/** + - core/** + - dev/** + - gradle/** + - meta/** + - scripts/** + - server/** + - server-common/** + - build.gradle.kts + - gradle.properties + - gradlew + - setting.gradle.kts + outputs: + source_changes: ${{ steps.filter.outputs.source_changes }} + + # Build for AMD64 architecture + Gvfs-Build: + needs: changes + if: needs.changes.outputs.source_changes == 'true' + runs-on: ubuntu-latest + timeout-minutes: 60 + strategy: + matrix: + architecture: [linux/amd64] + java-version: [ 17 ] + env: + PLATFORM: ${{ matrix.architecture }} + steps: + - uses: actions/checkout@v3 + + - uses: actions/setup-java@v4 + with: + java-version: ${{ matrix.java-version }} + distribution: 'temurin' + cache: 'gradle' + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Check required command + run: | + dev/ci/check_commands.sh + + - name: Build and test Gravitino + run: | + ./gradlew :clients:filesystem-fuse:build -PenableFuse=true + + - name: Free up disk space + run: | + dev/ci/util_free_space.sh + + - name: Upload tests reports + uses: actions/upload-artifact@v3 + if: ${{ (failure() && steps.integrationTest.outcome == 'failure') || contains(github.event.pull_request.labels.*.name, 'upload log') }} + with: + name: Gvfs-fuse integrate-test-reports-${{ matrix.java-version }} + path: | + clients/filesystem-fuse/build/test/log/*.log + diff --git a/api/src/main/java/org/apache/gravitino/MetadataObjects.java b/api/src/main/java/org/apache/gravitino/MetadataObjects.java index 74da23c10ea..557ccdefc49 100644 --- a/api/src/main/java/org/apache/gravitino/MetadataObjects.java +++ b/api/src/main/java/org/apache/gravitino/MetadataObjects.java @@ -21,6 +21,7 @@ import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.base.Splitter; +import java.util.Collections; import java.util.List; import javax.annotation.Nullable; import org.apache.commons.lang3.StringUtils; @@ -151,6 +152,9 @@ public static MetadataObject parse(String fullName, MetadataObject.Type type) { StringUtils.isNotBlank(fullName), "Metadata object full name cannot be blank"); List parts = DOT_SPLITTER.splitToList(fullName); + if (type == MetadataObject.Type.ROLE) { + return MetadataObjects.of(Collections.singletonList(fullName), MetadataObject.Type.ROLE); + } return MetadataObjects.of(parts, type); } diff --git a/api/src/main/java/org/apache/gravitino/credential/SupportsCredentials.java b/api/src/main/java/org/apache/gravitino/credential/SupportsCredentials.java index 678172c422a..b2569fe393d 100644 --- a/api/src/main/java/org/apache/gravitino/credential/SupportsCredentials.java +++ b/api/src/main/java/org/apache/gravitino/credential/SupportsCredentials.java @@ -41,7 +41,7 @@ public interface SupportsCredentials { * org.apache.gravitino.file.Fileset}, {@link org.apache.gravitino.rel.Table}. There will be * at most one credential for one credential type. */ - Credential[] getCredentials() throws NoSuchCredentialException; + Credential[] getCredentials(); /** * Retrieves an {@link Credential} object based on the specified credential type. diff --git a/api/src/test/java/org/apache/gravitino/TestMetadataObjects.java b/api/src/test/java/org/apache/gravitino/TestMetadataObjects.java index bab5c5833fe..f792220e185 100644 --- a/api/src/test/java/org/apache/gravitino/TestMetadataObjects.java +++ b/api/src/test/java/org/apache/gravitino/TestMetadataObjects.java @@ -84,4 +84,19 @@ public void testColumnObject() { MetadataObjects.of( Lists.newArrayList("catalog", "schema", "table"), MetadataObject.Type.COLUMN)); } + + @Test + public void testRoleObject() { + MetadataObject roleObject = MetadataObjects.of(null, "role.test", MetadataObject.Type.ROLE); + Assertions.assertEquals("role.test", roleObject.fullName()); + + MetadataObject roleObject1 = MetadataObjects.of(null, "role", MetadataObject.Type.ROLE); + Assertions.assertEquals("role", roleObject1.fullName()); + + MetadataObject roleObject2 = MetadataObjects.parse("role.test", MetadataObject.Type.ROLE); + Assertions.assertEquals("role.test", roleObject2.fullName()); + + MetadataObject roleObject3 = MetadataObjects.parse("role", MetadataObject.Type.ROLE); + Assertions.assertEquals("role", roleObject3.fullName()); + } } diff --git a/build.gradle.kts b/build.gradle.kts index 154b4e7f776..4ebd09a9a2e 100644 --- a/build.gradle.kts +++ b/build.gradle.kts @@ -501,6 +501,9 @@ subprojects { exclude("test/**") } } + tasks.named("compileJava").configure { + dependsOn("spotlessCheck") + } } tasks.rat { diff --git a/bundles/aws-bundle/build.gradle.kts b/bundles/aws-bundle/build.gradle.kts index 35b1e22a4f6..a5765fb0641 100644 --- a/bundles/aws-bundle/build.gradle.kts +++ b/bundles/aws-bundle/build.gradle.kts @@ -39,6 +39,7 @@ tasks.withType(ShadowJar::class.java) { relocate("org.apache.commons.lang3", "org.apache.gravitino.aws.shaded.org.apache.commons.lang3") relocate("com.google.common", "org.apache.gravitino.aws.shaded.com.google.common") relocate("com.fasterxml.jackson", "org.apache.gravitino.aws.shaded.com.fasterxml.jackson") + mergeServiceFiles() } tasks.jar { diff --git a/bundles/azure-bundle/build.gradle.kts b/bundles/azure-bundle/build.gradle.kts index 7d9e253ac8a..fd57d33e105 100644 --- a/bundles/azure-bundle/build.gradle.kts +++ b/bundles/azure-bundle/build.gradle.kts @@ -42,6 +42,7 @@ tasks.withType(ShadowJar::class.java) { relocate("com.fasterxml", "org.apache.gravitino.azure.shaded.com.fasterxml") relocate("com.google.common", "org.apache.gravitino.azure.shaded.com.google.common") relocate("org.eclipse.jetty", "org.apache.gravitino.azure.shaded.org.eclipse.jetty") + mergeServiceFiles() } tasks.jar { diff --git a/bundles/gcp-bundle/build.gradle.kts b/bundles/gcp-bundle/build.gradle.kts index 73efaf9f22c..50300fafe05 100644 --- a/bundles/gcp-bundle/build.gradle.kts +++ b/bundles/gcp-bundle/build.gradle.kts @@ -42,6 +42,7 @@ tasks.withType(ShadowJar::class.java) { relocate("com.google.common", "org.apache.gravitino.gcp.shaded.com.google.common") relocate("com.fasterxml", "org.apache.gravitino.gcp.shaded.com.fasterxml") relocate("org.eclipse.jetty", "org.apache.gravitino.gcp.shaded.org.eclipse.jetty") + mergeServiceFiles() } tasks.jar { diff --git a/bundles/gcp/src/main/java/org/apache/gravitino/gcs/credential/GCSTokenProvider.java b/bundles/gcp/src/main/java/org/apache/gravitino/gcs/credential/GCSTokenProvider.java index 3f7d5bcfaa3..f499b8c3e85 100644 --- a/bundles/gcp/src/main/java/org/apache/gravitino/gcs/credential/GCSTokenProvider.java +++ b/bundles/gcp/src/main/java/org/apache/gravitino/gcs/credential/GCSTokenProvider.java @@ -146,6 +146,13 @@ private CredentialAccessBoundary getAccessBoundary( CredentialAccessBoundary.newBuilder(); readBuckets.forEach( bucket -> { + // Hadoop GCS connector needs to get bucket info + AccessBoundaryRule bucketInfoRule = + AccessBoundaryRule.newBuilder() + .setAvailableResource(toGCSBucketResource(bucket)) + .setAvailablePermissions(Arrays.asList("inRole:roles/storage.legacyBucketReader")) + .build(); + credentialAccessBoundaryBuilder.addRule(bucketInfoRule); List readConditions = readExpressions.get(bucket); AccessBoundaryRule rule = getAccessBoundaryRule( diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/ErrorMessages.java b/clients/cli/src/main/java/org/apache/gravitino/cli/ErrorMessages.java index 084b5c34c85..ecf1dbff4c7 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/ErrorMessages.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/ErrorMessages.java @@ -21,48 +21,74 @@ /* User friendly error messages. */ public class ErrorMessages { - public static final String UNSUPPORTED_COMMAND = "Unsupported or unknown command."; - public static final String UNKNOWN_ENTITY = "Unknown entity."; - public static final String TOO_MANY_ARGUMENTS = "Too many arguments."; - public static final String UNKNOWN_METALAKE = "Unknown metalake name."; - public static final String UNKNOWN_CATALOG = "Unknown catalog name."; - public static final String UNKNOWN_SCHEMA = "Unknown schema name."; - public static final String UNKNOWN_TABLE = "Unknown table name."; - public static final String UNKNOWN_MODEL = "Unknown model name."; - public static final String MALFORMED_NAME = "Malformed entity name."; - public static final String MISSING_NAME = "Missing --name option."; - public static final String MISSING_METALAKE = "Missing --metalake option."; - public static final String MISSING_GROUP = "Missing --group option."; - public static final String MISSING_USER = "Missing --user option."; - public static final String MISSING_ROLE = "Missing --role option."; - public static final String MISSING_TAG = "Missing --tag option."; - public static final String METALAKE_EXISTS = "Metalake already exists."; public static final String CATALOG_EXISTS = "Catalog already exists."; - public static final String SCHEMA_EXISTS = "Schema already exists."; - public static final String UNKNOWN_USER = "Unknown user."; - public static final String USER_EXISTS = "User already exists."; - public static final String UNKNOWN_GROUP = "Unknown group."; - public static final String GROUP_EXISTS = "Group already exists."; - public static final String UNKNOWN_TAG = "Unknown tag."; - public static final String MULTIPLE_TAG_COMMAND_ERROR = - "Error: The current command only supports one --tag option."; - public static final String TAG_EXISTS = "Tag already exists."; - public static final String UNKNOWN_COLUMN = "Unknown column."; public static final String COLUMN_EXISTS = "Column already exists."; - public static final String UNKNOWN_TOPIC = "Unknown topic."; - public static final String TOPIC_EXISTS = "Topic already exists."; - public static final String UNKNOWN_FILESET = "Unknown fileset."; public static final String FILESET_EXISTS = "Fileset already exists."; - public static final String TAG_EMPTY = "Error: Must configure --tag option."; - public static final String UNKNOWN_ROLE = "Unknown role."; + public static final String GROUP_EXISTS = "Group already exists."; + public static final String METALAKE_EXISTS = "Metalake already exists."; + public static final String MODEL_EXISTS = "Model already exists."; public static final String ROLE_EXISTS = "Role already exists."; + public static final String SCHEMA_EXISTS = "Schema already exists."; public static final String TABLE_EXISTS = "Table already exists."; - public static final String MODEL_EXISTS = "Model already exists."; - public static final String INVALID_SET_COMMAND = - "Unsupported combination of options either use --name, --user, --group or --property and --value."; - public static final String INVALID_REMOVE_COMMAND = - "Unsupported combination of options either use --name or --property."; + public static final String TAG_EXISTS = "Tag already exists."; + public static final String TOPIC_EXISTS = "Topic already exists."; + public static final String USER_EXISTS = "User already exists."; + + public static final String ENTITY_IN_USE = " in use, please disable it first."; + + public static final String INVALID_ENABLE_DISABLE = + "Unable to us --enable and --disable at the same time"; public static final String INVALID_OWNER_COMMAND = "Unsupported combination of options either use --user or --group."; + public static final String INVALID_REMOVE_COMMAND = + "Unsupported combination of options either use --name or --property."; + public static final String INVALID_SET_COMMAND = + "Unsupported combination of options either use --name, --user, --group or --property and --value."; + + public static final String HELP_FAILED = "Failed to load help message: "; + + public static final String MALFORMED_NAME = "Malformed entity name."; + public static final String MISSING_COLUMN_FILE = "Missing --columnfile option."; + public static final String MISSING_DATATYPE = "Missing --datatype option."; + public static final String MISSING_ENTITIES = "Missing required entity names: "; + + public static final String MISSING_GROUP = "Missing --group option."; + public static final String MISSING_METALAKE = "Missing --metalake option."; + public static final String MISSING_NAME = "Missing --name option."; + public static final String MISSING_PRIVILEGES = "Missing --privilege option."; + public static final String MISSING_PROPERTY = "Missing --property option."; + public static final String MISSING_PROPERTY_AND_VALUE = "Missing --property and --value options."; + public static final String MISSING_ROLE = "Missing --role option."; + public static final String MISSING_TAG = "Missing --tag option."; + public static final String MISSING_URI = "Missing --uri option."; + public static final String MISSING_USER = "Missing --user option."; + public static final String MISSING_VALUE = "Missing --value option."; + + public static final String MULTIPLE_ROLE_COMMAND_ERROR = + "This command only supports one --role option."; + public static final String MULTIPLE_TAG_COMMAND_ERROR = + "This command only supports one --tag option."; + public static final String MISSING_PROVIDER = "Missing --provider option."; + + public static final String REGISTER_FAILED = "Failed to register model: "; + + public static final String UNKNOWN_CATALOG = "Unknown catalog name."; + public static final String UNKNOWN_COLUMN = "Unknown column name."; + public static final String UNKNOWN_ENTITY = "Unknown entity."; + public static final String UNKNOWN_FILESET = "Unknown fileset name."; + public static final String UNKNOWN_GROUP = "Unknown group."; + public static final String UNKNOWN_METALAKE = "Unknown metalake name."; + public static final String UNKNOWN_MODEL = "Unknown model name."; + public static final String UNKNOWN_PRIVILEGE = "Unknown privilege"; + public static final String UNKNOWN_ROLE = "Unknown role."; + public static final String UNKNOWN_SCHEMA = "Unknown schema name."; + public static final String UNKNOWN_TABLE = "Unknown table name."; + public static final String UNKNOWN_TAG = "Unknown tag."; + public static final String UNKNOWN_TOPIC = "Unknown topic name."; + public static final String UNKNOWN_USER = "Unknown user."; + + public static final String PARSE_ERROR = "Error parsing command line: "; + public static final String TOO_MANY_ARGUMENTS = "Too many arguments."; public static final String UNSUPPORTED_ACTION = "Entity doesn't support this action."; + public static final String UNSUPPORTED_COMMAND = "Unsupported or unknown command."; } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/FullName.java b/clients/cli/src/main/java/org/apache/gravitino/cli/FullName.java index a3b206dfdd1..7a9481cb95b 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/FullName.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/FullName.java @@ -74,6 +74,7 @@ public String getMetalakeName() { } System.err.println(ErrorMessages.MISSING_METALAKE); + Main.exit(-1); return null; } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoCommandLine.java b/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoCommandLine.java index c23fb8b7cd0..442ec2d1c33 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoCommandLine.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoCommandLine.java @@ -20,7 +20,6 @@ package org.apache.gravitino.cli; import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import java.io.BufferedReader; import java.io.IOException; @@ -30,7 +29,6 @@ import java.util.Arrays; import java.util.List; import java.util.Map; -import java.util.Objects; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Options; @@ -140,8 +138,6 @@ private void executeCommand() { handleCatalogCommand(); } else if (entity.equals(CommandEntities.METALAKE)) { handleMetalakeCommand(); - } else if (entity.equals(CommandEntities.MODEL)) { - handleModelCommand(); } else if (entity.equals(CommandEntities.TOPIC)) { handleTopicCommand(); } else if (entity.equals(CommandEntities.FILESET)) { @@ -167,75 +163,74 @@ private void handleMetalakeCommand() { String auth = getAuth(); String userName = line.getOptionValue(GravitinoOptions.LOGIN); FullName name = new FullName(line); - String metalake = name.getMetalakeName(); String outputFormat = line.getOptionValue(GravitinoOptions.OUTPUT); Command.setAuthenticationMode(auth, userName); + if (CommandActions.LIST.equals(command)) { + newListMetalakes(url, ignore, outputFormat).validate().handle(); + return; + } + + String metalake = name.getMetalakeName(); + switch (command) { case CommandActions.DETAILS: if (line.hasOption(GravitinoOptions.AUDIT)) { - newMetalakeAudit(url, ignore, metalake).handle(); + newMetalakeAudit(url, ignore, metalake).validate().handle(); } else { - newMetalakeDetails(url, ignore, outputFormat, metalake).handle(); + newMetalakeDetails(url, ignore, outputFormat, metalake).validate().handle(); } break; - case CommandActions.LIST: - newListMetalakes(url, ignore, outputFormat).handle(); - break; - case CommandActions.CREATE: - if (Objects.isNull(metalake)) { - System.err.println(CommandEntities.METALAKE + " is not defined"); - Main.exit(-1); - } String comment = line.getOptionValue(GravitinoOptions.COMMENT); - newCreateMetalake(url, ignore, metalake, comment).handle(); + newCreateMetalake(url, ignore, metalake, comment).validate().handle(); break; case CommandActions.DELETE: boolean force = line.hasOption(GravitinoOptions.FORCE); - newDeleteMetalake(url, ignore, force, metalake).handle(); + newDeleteMetalake(url, ignore, force, metalake).validate().handle(); break; case CommandActions.SET: String property = line.getOptionValue(GravitinoOptions.PROPERTY); String value = line.getOptionValue(GravitinoOptions.VALUE); - newSetMetalakeProperty(url, ignore, metalake, property, value).handle(); + newSetMetalakeProperty(url, ignore, metalake, property, value).validate().handle(); break; case CommandActions.REMOVE: property = line.getOptionValue(GravitinoOptions.PROPERTY); - newRemoveMetalakeProperty(url, ignore, metalake, property).handle(); + newRemoveMetalakeProperty(url, ignore, metalake, property).validate().handle(); break; case CommandActions.PROPERTIES: - newListMetalakeProperties(url, ignore, metalake).handle(); + newListMetalakeProperties(url, ignore, metalake).validate().handle(); break; case CommandActions.UPDATE: if (line.hasOption(GravitinoOptions.ENABLE) && line.hasOption(GravitinoOptions.DISABLE)) { - System.err.println("Unable to enable and disable at the same time"); + System.err.println(ErrorMessages.INVALID_ENABLE_DISABLE); Main.exit(-1); } if (line.hasOption(GravitinoOptions.ENABLE)) { boolean enableAllCatalogs = line.hasOption(GravitinoOptions.ALL); - newMetalakeEnable(url, ignore, metalake, enableAllCatalogs).handle(); + newMetalakeEnable(url, ignore, metalake, enableAllCatalogs).validate().handle(); } if (line.hasOption(GravitinoOptions.DISABLE)) { - newMetalakeDisable(url, ignore, metalake).handle(); + newMetalakeDisable(url, ignore, metalake).validate().handle(); } if (line.hasOption(GravitinoOptions.COMMENT)) { comment = line.getOptionValue(GravitinoOptions.COMMENT); - newUpdateMetalakeComment(url, ignore, metalake, comment).handle(); + newUpdateMetalakeComment(url, ignore, metalake, comment).validate().handle(); } if (line.hasOption(GravitinoOptions.RENAME)) { String newName = line.getOptionValue(GravitinoOptions.RENAME); force = line.hasOption(GravitinoOptions.FORCE); - newUpdateMetalakeName(url, ignore, force, metalake, newName).handle(); + newUpdateMetalakeName(url, ignore, force, metalake, newName).validate().handle(); } + break; default: @@ -261,7 +256,7 @@ private void handleCatalogCommand() { // Handle the CommandActions.LIST action separately as it doesn't use `catalog` if (CommandActions.LIST.equals(command)) { - newListCatalogs(url, ignore, outputFormat, metalake).handle(); + newListCatalogs(url, ignore, outputFormat, metalake).validate().handle(); return; } @@ -272,9 +267,9 @@ private void handleCatalogCommand() { switch (command) { case CommandActions.DETAILS: if (line.hasOption(GravitinoOptions.AUDIT)) { - newCatalogAudit(url, ignore, metalake, catalog).handle(); + newCatalogAudit(url, ignore, metalake, catalog).validate().handle(); } else { - newCatalogDetails(url, ignore, outputFormat, metalake, catalog).handle(); + newCatalogDetails(url, ignore, outputFormat, metalake, catalog).validate().handle(); } break; @@ -283,49 +278,53 @@ private void handleCatalogCommand() { String provider = line.getOptionValue(GravitinoOptions.PROVIDER); String[] properties = line.getOptionValues(CommandActions.PROPERTIES); Map propertyMap = new Properties().parse(properties); - newCreateCatalog(url, ignore, metalake, catalog, provider, comment, propertyMap).handle(); + newCreateCatalog(url, ignore, metalake, catalog, provider, comment, propertyMap) + .validate() + .handle(); break; case CommandActions.DELETE: boolean force = line.hasOption(GravitinoOptions.FORCE); - newDeleteCatalog(url, ignore, force, metalake, catalog).handle(); + newDeleteCatalog(url, ignore, force, metalake, catalog).validate().handle(); break; case CommandActions.SET: String property = line.getOptionValue(GravitinoOptions.PROPERTY); String value = line.getOptionValue(GravitinoOptions.VALUE); - newSetCatalogProperty(url, ignore, metalake, catalog, property, value).handle(); + newSetCatalogProperty(url, ignore, metalake, catalog, property, value).validate().handle(); break; case CommandActions.REMOVE: property = line.getOptionValue(GravitinoOptions.PROPERTY); - newRemoveCatalogProperty(url, ignore, metalake, catalog, property).handle(); + newRemoveCatalogProperty(url, ignore, metalake, catalog, property).validate().handle(); break; case CommandActions.PROPERTIES: - newListCatalogProperties(url, ignore, metalake, catalog).handle(); + newListCatalogProperties(url, ignore, metalake, catalog).validate().handle(); break; case CommandActions.UPDATE: if (line.hasOption(GravitinoOptions.ENABLE) && line.hasOption(GravitinoOptions.DISABLE)) { - System.err.println("Unable to enable and disable at the same time"); + System.err.println(ErrorMessages.INVALID_ENABLE_DISABLE); Main.exit(-1); } if (line.hasOption(GravitinoOptions.ENABLE)) { boolean enableMetalake = line.hasOption(GravitinoOptions.ALL); - newCatalogEnable(url, ignore, metalake, catalog, enableMetalake).handle(); + newCatalogEnable(url, ignore, metalake, catalog, enableMetalake).validate().handle(); } if (line.hasOption(GravitinoOptions.DISABLE)) { - newCatalogDisable(url, ignore, metalake, catalog).handle(); + newCatalogDisable(url, ignore, metalake, catalog).validate().handle(); } if (line.hasOption(GravitinoOptions.COMMENT)) { String updateComment = line.getOptionValue(GravitinoOptions.COMMENT); - newUpdateCatalogComment(url, ignore, metalake, catalog, updateComment).handle(); + newUpdateCatalogComment(url, ignore, metalake, catalog, updateComment) + .validate() + .handle(); } if (line.hasOption(GravitinoOptions.RENAME)) { String newName = line.getOptionValue(GravitinoOptions.RENAME); - newUpdateCatalogName(url, ignore, metalake, catalog, newName).handle(); + newUpdateCatalogName(url, ignore, metalake, catalog, newName).validate().handle(); } break; @@ -356,7 +355,7 @@ private void handleSchemaCommand() { // Handle the CommandActions.LIST action separately as it doesn't use `schema` if (CommandActions.LIST.equals(command)) { checkEntities(missingEntities); - newListSchema(url, ignore, metalake, catalog).handle(); + newListSchema(url, ignore, metalake, catalog).validate().handle(); return; } @@ -367,35 +366,39 @@ private void handleSchemaCommand() { switch (command) { case CommandActions.DETAILS: if (line.hasOption(GravitinoOptions.AUDIT)) { - newSchemaAudit(url, ignore, metalake, catalog, schema).handle(); + newSchemaAudit(url, ignore, metalake, catalog, schema).validate().handle(); } else { - newSchemaDetails(url, ignore, metalake, catalog, schema).handle(); + newSchemaDetails(url, ignore, metalake, catalog, schema).validate().handle(); } break; case CommandActions.CREATE: String comment = line.getOptionValue(GravitinoOptions.COMMENT); - newCreateSchema(url, ignore, metalake, catalog, schema, comment).handle(); + newCreateSchema(url, ignore, metalake, catalog, schema, comment).validate().handle(); break; case CommandActions.DELETE: boolean force = line.hasOption(GravitinoOptions.FORCE); - newDeleteSchema(url, ignore, force, metalake, catalog, schema).handle(); + newDeleteSchema(url, ignore, force, metalake, catalog, schema).validate().handle(); break; case CommandActions.SET: String property = line.getOptionValue(GravitinoOptions.PROPERTY); String value = line.getOptionValue(GravitinoOptions.VALUE); - newSetSchemaProperty(url, ignore, metalake, catalog, schema, property, value).handle(); + newSetSchemaProperty(url, ignore, metalake, catalog, schema, property, value) + .validate() + .handle(); break; case CommandActions.REMOVE: property = line.getOptionValue(GravitinoOptions.PROPERTY); - newRemoveSchemaProperty(url, ignore, metalake, catalog, schema, property).handle(); + newRemoveSchemaProperty(url, ignore, metalake, catalog, schema, property) + .validate() + .handle(); break; case CommandActions.PROPERTIES: - newListSchemaProperties(url, ignore, metalake, catalog, schema).handle(); + newListSchemaProperties(url, ignore, metalake, catalog, schema).validate().handle(); break; default: @@ -425,7 +428,7 @@ private void handleTableCommand() { // Handle CommandActions.LIST action separately as it doesn't require the `table` if (CommandActions.LIST.equals(command)) { checkEntities(missingEntities); - newListTables(url, ignore, metalake, catalog, schema).handle(); + newListTables(url, ignore, metalake, catalog, schema).validate().handle(); return; } @@ -436,17 +439,17 @@ private void handleTableCommand() { switch (command) { case CommandActions.DETAILS: if (line.hasOption(GravitinoOptions.AUDIT)) { - newTableAudit(url, ignore, metalake, catalog, schema, table).handle(); + newTableAudit(url, ignore, metalake, catalog, schema, table).validate().handle(); } else if (line.hasOption(GravitinoOptions.INDEX)) { - newListIndexes(url, ignore, metalake, catalog, schema, table).handle(); + newListIndexes(url, ignore, metalake, catalog, schema, table).validate().handle(); } else if (line.hasOption(GravitinoOptions.DISTRIBUTION)) { - newTableDistribution(url, ignore, metalake, catalog, schema, table).handle(); + newTableDistribution(url, ignore, metalake, catalog, schema, table).validate().handle(); } else if (line.hasOption(GravitinoOptions.PARTITION)) { - newTablePartition(url, ignore, metalake, catalog, schema, table).handle(); + newTablePartition(url, ignore, metalake, catalog, schema, table).validate().handle(); } else if (line.hasOption(GravitinoOptions.SORTORDER)) { - newTableSortOrder(url, ignore, metalake, catalog, schema, table).handle(); + newTableSortOrder(url, ignore, metalake, catalog, schema, table).validate().handle(); } else { - newTableDetails(url, ignore, metalake, catalog, schema, table).handle(); + newTableDetails(url, ignore, metalake, catalog, schema, table).validate().handle(); } break; @@ -455,39 +458,47 @@ private void handleTableCommand() { String columnFile = line.getOptionValue(GravitinoOptions.COLUMNFILE); String comment = line.getOptionValue(GravitinoOptions.COMMENT); newCreateTable(url, ignore, metalake, catalog, schema, table, columnFile, comment) + .validate() .handle(); break; } case CommandActions.DELETE: boolean force = line.hasOption(GravitinoOptions.FORCE); - newDeleteTable(url, ignore, force, metalake, catalog, schema, table).handle(); + newDeleteTable(url, ignore, force, metalake, catalog, schema, table).validate().handle(); break; case CommandActions.SET: String property = line.getOptionValue(GravitinoOptions.PROPERTY); String value = line.getOptionValue(GravitinoOptions.VALUE); newSetTableProperty(url, ignore, metalake, catalog, schema, table, property, value) + .validate() .handle(); break; case CommandActions.REMOVE: property = line.getOptionValue(GravitinoOptions.PROPERTY); - newRemoveTableProperty(url, ignore, metalake, catalog, schema, table, property).handle(); + newRemoveTableProperty(url, ignore, metalake, catalog, schema, table, property) + .validate() + .handle(); break; case CommandActions.PROPERTIES: - newListTableProperties(url, ignore, metalake, catalog, schema, table).handle(); + newListTableProperties(url, ignore, metalake, catalog, schema, table).validate().handle(); break; case CommandActions.UPDATE: { if (line.hasOption(GravitinoOptions.COMMENT)) { String comment = line.getOptionValue(GravitinoOptions.COMMENT); - newUpdateTableComment(url, ignore, metalake, catalog, schema, table, comment).handle(); + newUpdateTableComment(url, ignore, metalake, catalog, schema, table, comment) + .validate() + .handle(); } if (line.hasOption(GravitinoOptions.RENAME)) { String newName = line.getOptionValue(GravitinoOptions.RENAME); - newUpdateTableName(url, ignore, metalake, catalog, schema, table, newName).handle(); + newUpdateTableName(url, ignore, metalake, catalog, schema, table, newName) + .validate() + .handle(); } break; } @@ -517,29 +528,29 @@ protected void handleUserCommand() { switch (command) { case CommandActions.DETAILS: if (line.hasOption(GravitinoOptions.AUDIT)) { - newUserAudit(url, ignore, metalake, user).handle(); + newUserAudit(url, ignore, metalake, user).validate().handle(); } else { - newUserDetails(url, ignore, metalake, user).handle(); + newUserDetails(url, ignore, metalake, user).validate().handle(); } break; case CommandActions.LIST: - newListUsers(url, ignore, metalake).handle(); + newListUsers(url, ignore, metalake).validate().handle(); break; case CommandActions.CREATE: - newCreateUser(url, ignore, metalake, user).handle(); + newCreateUser(url, ignore, metalake, user).validate().handle(); break; case CommandActions.DELETE: boolean force = line.hasOption(GravitinoOptions.FORCE); - newDeleteUser(url, ignore, force, metalake, user).handle(); + newDeleteUser(url, ignore, force, metalake, user).validate().handle(); break; case CommandActions.REVOKE: String[] revokeRoles = line.getOptionValues(GravitinoOptions.ROLE); for (String role : revokeRoles) { - newRemoveRoleFromUser(url, ignore, metalake, user, role).handle(); + newRemoveRoleFromUser(url, ignore, metalake, user, role).validate().handle(); } System.out.printf("Remove roles %s from user %s%n", COMMA_JOINER.join(revokeRoles), user); break; @@ -547,7 +558,7 @@ protected void handleUserCommand() { case CommandActions.GRANT: String[] grantRoles = line.getOptionValues(GravitinoOptions.ROLE); for (String role : grantRoles) { - newAddRoleToUser(url, ignore, metalake, user, role).handle(); + newAddRoleToUser(url, ignore, metalake, user, role).validate().handle(); } System.out.printf("Grant roles %s to user %s%n", COMMA_JOINER.join(grantRoles), user); break; @@ -578,29 +589,29 @@ protected void handleGroupCommand() { switch (command) { case CommandActions.DETAILS: if (line.hasOption(GravitinoOptions.AUDIT)) { - newGroupAudit(url, ignore, metalake, group).handle(); + newGroupAudit(url, ignore, metalake, group).validate().handle(); } else { - newGroupDetails(url, ignore, metalake, group).handle(); + newGroupDetails(url, ignore, metalake, group).validate().handle(); } break; case CommandActions.LIST: - newListGroups(url, ignore, metalake).handle(); + newListGroups(url, ignore, metalake).validate().handle(); break; case CommandActions.CREATE: - newCreateGroup(url, ignore, metalake, group).handle(); + newCreateGroup(url, ignore, metalake, group).validate().handle(); break; case CommandActions.DELETE: boolean force = line.hasOption(GravitinoOptions.FORCE); - newDeleteGroup(url, ignore, force, metalake, group).handle(); + newDeleteGroup(url, ignore, force, metalake, group).validate().handle(); break; case CommandActions.REVOKE: String[] revokeRoles = line.getOptionValues(GravitinoOptions.ROLE); for (String role : revokeRoles) { - newRemoveRoleFromGroup(url, ignore, metalake, group, role).handle(); + newRemoveRoleFromGroup(url, ignore, metalake, group, role).validate().handle(); } System.out.printf("Remove roles %s from group %s%n", COMMA_JOINER.join(revokeRoles), group); break; @@ -608,7 +619,7 @@ protected void handleGroupCommand() { case CommandActions.GRANT: String[] grantRoles = line.getOptionValues(GravitinoOptions.ROLE); for (String role : grantRoles) { - newAddRoleToGroup(url, ignore, metalake, group, role).handle(); + newAddRoleToGroup(url, ignore, metalake, group, role).validate().handle(); } System.out.printf("Grant roles %s to group %s%n", COMMA_JOINER.join(grantRoles), group); break; @@ -631,12 +642,6 @@ protected void handleTagCommand() { Command.setAuthenticationMode(auth, userName); String[] tags = line.getOptionValues(GravitinoOptions.TAG); - if (tags == null - && !((CommandActions.REMOVE.equals(command) && line.hasOption(GravitinoOptions.FORCE)) - || CommandActions.LIST.equals(command))) { - System.err.println(ErrorMessages.MISSING_TAG); - Main.exit(-1); - } if (tags != null) { tags = Arrays.stream(tags).distinct().toArray(String[]::new); @@ -644,41 +649,36 @@ protected void handleTagCommand() { switch (command) { case CommandActions.DETAILS: - newTagDetails(url, ignore, metalake, getOneTag(tags)).handle(); + newTagDetails(url, ignore, metalake, getOneTag(tags)).validate().handle(); break; case CommandActions.LIST: if (!name.hasCatalogName()) { - newListTags(url, ignore, metalake).handle(); + newListTags(url, ignore, metalake).validate().handle(); } else { - newListEntityTags(url, ignore, metalake, name).handle(); + newListEntityTags(url, ignore, metalake, name).validate().handle(); } break; case CommandActions.CREATE: String comment = line.getOptionValue(GravitinoOptions.COMMENT); - newCreateTags(url, ignore, metalake, tags, comment).handle(); + newCreateTags(url, ignore, metalake, tags, comment).validate().handle(); break; case CommandActions.DELETE: boolean forceDelete = line.hasOption(GravitinoOptions.FORCE); - newDeleteTag(url, ignore, forceDelete, metalake, tags).handle(); + newDeleteTag(url, ignore, forceDelete, metalake, tags).validate().handle(); break; case CommandActions.SET: String propertySet = line.getOptionValue(GravitinoOptions.PROPERTY); String valueSet = line.getOptionValue(GravitinoOptions.VALUE); - if (propertySet != null && valueSet != null) { - newSetTagProperty(url, ignore, metalake, getOneTag(tags), propertySet, valueSet).handle(); - } else if (propertySet == null && valueSet == null) { - if (!name.hasName()) { - System.err.println(ErrorMessages.MISSING_NAME); - Main.exit(-1); - } - newTagEntity(url, ignore, metalake, name, tags).handle(); + if (propertySet == null && valueSet == null) { + newTagEntity(url, ignore, metalake, name, tags).validate().handle(); } else { - System.err.println("The set command only supports tag properties or attaching tags."); - Main.exit(-1); + newSetTagProperty(url, ignore, metalake, getOneTag(tags), propertySet, valueSet) + .validate() + .handle(); } break; @@ -686,33 +686,33 @@ protected void handleTagCommand() { boolean isTag = line.hasOption(GravitinoOptions.TAG); if (!isTag) { boolean forceRemove = line.hasOption(GravitinoOptions.FORCE); - newRemoveAllTags(url, ignore, metalake, name, forceRemove).handle(); + newRemoveAllTags(url, ignore, metalake, name, forceRemove).validate().handle(); } else { String propertyRemove = line.getOptionValue(GravitinoOptions.PROPERTY); if (propertyRemove != null) { - newRemoveTagProperty(url, ignore, metalake, getOneTag(tags), propertyRemove).handle(); + newRemoveTagProperty(url, ignore, metalake, getOneTag(tags), propertyRemove) + .validate() + .handle(); } else { - if (!name.hasName()) { - System.err.println(ErrorMessages.MISSING_NAME); - Main.exit(-1); - } - newUntagEntity(url, ignore, metalake, name, tags).handle(); + newUntagEntity(url, ignore, metalake, name, tags).validate().handle(); } } break; case CommandActions.PROPERTIES: - newListTagProperties(url, ignore, metalake, getOneTag(tags)).handle(); + newListTagProperties(url, ignore, metalake, getOneTag(tags)).validate().handle(); break; case CommandActions.UPDATE: if (line.hasOption(GravitinoOptions.COMMENT)) { String updateComment = line.getOptionValue(GravitinoOptions.COMMENT); - newUpdateTagComment(url, ignore, metalake, getOneTag(tags), updateComment).handle(); + newUpdateTagComment(url, ignore, metalake, getOneTag(tags), updateComment) + .validate() + .handle(); } if (line.hasOption(GravitinoOptions.RENAME)) { String newName = line.getOptionValue(GravitinoOptions.RENAME); - newUpdateTagName(url, ignore, metalake, getOneTag(tags), newName).handle(); + newUpdateTagName(url, ignore, metalake, getOneTag(tags), newName).validate().handle(); } break; @@ -724,7 +724,10 @@ protected void handleTagCommand() { } private String getOneTag(String[] tags) { - Preconditions.checkArgument(tags.length <= 1, ErrorMessages.MULTIPLE_TAG_COMMAND_ERROR); + if (tags == null || tags.length > 1) { + System.err.println(ErrorMessages.MULTIPLE_TAG_COMMAND_ERROR); + Main.exit(-1); + } return tags[0]; } @@ -752,34 +755,34 @@ protected void handleRoleCommand() { switch (command) { case CommandActions.DETAILS: if (line.hasOption(GravitinoOptions.AUDIT)) { - newRoleAudit(url, ignore, metalake, getOneRole(roles, CommandActions.DETAILS)).handle(); + newRoleAudit(url, ignore, metalake, getOneRole(roles)).validate().handle(); } else { - newRoleDetails(url, ignore, metalake, getOneRole(roles, CommandActions.DETAILS)).handle(); + newRoleDetails(url, ignore, metalake, getOneRole(roles)).validate().handle(); } break; case CommandActions.LIST: - newListRoles(url, ignore, metalake).handle(); + newListRoles(url, ignore, metalake).validate().handle(); break; case CommandActions.CREATE: - newCreateRole(url, ignore, metalake, roles).handle(); + newCreateRole(url, ignore, metalake, roles).validate().handle(); break; case CommandActions.DELETE: boolean forceDelete = line.hasOption(GravitinoOptions.FORCE); - newDeleteRole(url, ignore, forceDelete, metalake, roles).handle(); + newDeleteRole(url, ignore, forceDelete, metalake, roles).validate().handle(); break; case CommandActions.GRANT: - newGrantPrivilegesToRole( - url, ignore, metalake, getOneRole(roles, CommandActions.GRANT), name, privileges) + newGrantPrivilegesToRole(url, ignore, metalake, getOneRole(roles), name, privileges) + .validate() .handle(); break; case CommandActions.REVOKE: - newRevokePrivilegesFromRole( - url, ignore, metalake, getOneRole(roles, CommandActions.REMOVE), name, privileges) + newRevokePrivilegesFromRole(url, ignore, metalake, getOneRole(roles), name, privileges) + .validate() .handle(); break; @@ -790,9 +793,12 @@ url, ignore, metalake, getOneRole(roles, CommandActions.REMOVE), name, privilege } } - private String getOneRole(String[] roles, String command) { - Preconditions.checkArgument( - roles.length == 1, command + " requires only one role, but multiple are currently passed."); + private String getOneRole(String[] roles) { + if (roles == null || roles.length != 1) { + System.err.println(ErrorMessages.MULTIPLE_ROLE_COMMAND_ERROR); + Main.exit(-1); + } + return roles[0]; } @@ -818,7 +824,7 @@ private void handleColumnCommand() { if (CommandActions.LIST.equals(command)) { checkEntities(missingEntities); - newListColumns(url, ignore, metalake, catalog, schema, table).handle(); + newListColumns(url, ignore, metalake, catalog, schema, table).validate().handle(); return; } @@ -829,7 +835,7 @@ private void handleColumnCommand() { switch (command) { case CommandActions.DETAILS: if (line.hasOption(GravitinoOptions.AUDIT)) { - newColumnAudit(url, ignore, metalake, catalog, schema, table, column).handle(); + newColumnAudit(url, ignore, metalake, catalog, schema, table, column).validate().handle(); } else { System.err.println(ErrorMessages.UNSUPPORTED_ACTION); Main.exit(-1); @@ -863,12 +869,13 @@ private void handleColumnCommand() { nullable, autoIncrement, defaultValue) + .validate() .handle(); break; } case CommandActions.DELETE: - newDeleteColumn(url, ignore, metalake, catalog, schema, table, column).handle(); + newDeleteColumn(url, ignore, metalake, catalog, schema, table, column).validate().handle(); break; case CommandActions.UPDATE: @@ -876,34 +883,40 @@ private void handleColumnCommand() { if (line.hasOption(GravitinoOptions.COMMENT)) { String comment = line.getOptionValue(GravitinoOptions.COMMENT); newUpdateColumnComment(url, ignore, metalake, catalog, schema, table, column, comment) + .validate() .handle(); } if (line.hasOption(GravitinoOptions.RENAME)) { String newName = line.getOptionValue(GravitinoOptions.RENAME); newUpdateColumnName(url, ignore, metalake, catalog, schema, table, column, newName) + .validate() .handle(); } if (line.hasOption(GravitinoOptions.DATATYPE) && !line.hasOption(GravitinoOptions.DEFAULT)) { String datatype = line.getOptionValue(GravitinoOptions.DATATYPE); newUpdateColumnDatatype(url, ignore, metalake, catalog, schema, table, column, datatype) + .validate() .handle(); } if (line.hasOption(GravitinoOptions.POSITION)) { String position = line.getOptionValue(GravitinoOptions.POSITION); newUpdateColumnPosition(url, ignore, metalake, catalog, schema, table, column, position) + .validate() .handle(); } if (line.hasOption(GravitinoOptions.NULL)) { boolean nullable = line.getOptionValue(GravitinoOptions.NULL).equals("true"); newUpdateColumnNullability( url, ignore, metalake, catalog, schema, table, column, nullable) + .validate() .handle(); } if (line.hasOption(GravitinoOptions.AUTO)) { boolean autoIncrement = line.getOptionValue(GravitinoOptions.AUTO).equals("true"); newUpdateColumnAutoIncrement( url, ignore, metalake, catalog, schema, table, column, autoIncrement) + .validate() .handle(); } if (line.hasOption(GravitinoOptions.DEFAULT)) { @@ -911,6 +924,7 @@ private void handleColumnCommand() { String dataType = line.getOptionValue(GravitinoOptions.DATATYPE); newUpdateColumnDefault( url, ignore, metalake, catalog, schema, table, column, defaultValue, dataType) + .validate() .handle(); } break; @@ -936,7 +950,7 @@ private void handleHelpCommand() { } System.out.print(helpMessage.toString()); } catch (IOException e) { - System.err.println("Failed to load help message: " + e.getMessage()); + System.err.println(ErrorMessages.HELP_FAILED + e.getMessage()); Main.exit(-1); } } @@ -1000,7 +1014,7 @@ private void handleTopicCommand() { if (CommandActions.LIST.equals(command)) { checkEntities(missingEntities); - newListTopics(url, ignore, metalake, catalog, schema).handle(); + newListTopics(url, ignore, metalake, catalog, schema).validate().handle(); return; } @@ -1010,20 +1024,22 @@ private void handleTopicCommand() { switch (command) { case CommandActions.DETAILS: - newTopicDetails(url, ignore, metalake, catalog, schema, topic).handle(); + newTopicDetails(url, ignore, metalake, catalog, schema, topic).validate().handle(); break; case CommandActions.CREATE: { String comment = line.getOptionValue(GravitinoOptions.COMMENT); - newCreateTopic(url, ignore, metalake, catalog, schema, topic, comment).handle(); + newCreateTopic(url, ignore, metalake, catalog, schema, topic, comment) + .validate() + .handle(); break; } case CommandActions.DELETE: { boolean force = line.hasOption(GravitinoOptions.FORCE); - newDeleteTopic(url, ignore, force, metalake, catalog, schema, topic).handle(); + newDeleteTopic(url, ignore, force, metalake, catalog, schema, topic).validate().handle(); break; } @@ -1031,7 +1047,9 @@ private void handleTopicCommand() { { if (line.hasOption(GravitinoOptions.COMMENT)) { String comment = line.getOptionValue(GravitinoOptions.COMMENT); - newUpdateTopicComment(url, ignore, metalake, catalog, schema, topic, comment).handle(); + newUpdateTopicComment(url, ignore, metalake, catalog, schema, topic, comment) + .validate() + .handle(); } break; } @@ -1041,6 +1059,7 @@ private void handleTopicCommand() { String property = line.getOptionValue(GravitinoOptions.PROPERTY); String value = line.getOptionValue(GravitinoOptions.VALUE); newSetTopicProperty(url, ignore, metalake, catalog, schema, topic, property, value) + .validate() .handle(); break; } @@ -1048,12 +1067,14 @@ private void handleTopicCommand() { case CommandActions.REMOVE: { String property = line.getOptionValue(GravitinoOptions.PROPERTY); - newRemoveTopicProperty(url, ignore, metalake, catalog, schema, topic, property).handle(); + newRemoveTopicProperty(url, ignore, metalake, catalog, schema, topic, property) + .validate() + .handle(); break; } case CommandActions.PROPERTIES: - newListTopicProperties(url, ignore, metalake, catalog, schema, topic).handle(); + newListTopicProperties(url, ignore, metalake, catalog, schema, topic).validate().handle(); break; default: @@ -1083,7 +1104,7 @@ private void handleFilesetCommand() { // Handle CommandActions.LIST action separately as it doesn't require the `fileset` if (CommandActions.LIST.equals(command)) { checkEntities(missingEntities); - newListFilesets(url, ignore, metalake, catalog, schema).handle(); + newListFilesets(url, ignore, metalake, catalog, schema).validate().handle(); return; } @@ -1093,7 +1114,7 @@ private void handleFilesetCommand() { switch (command) { case CommandActions.DETAILS: - newFilesetDetails(url, ignore, metalake, catalog, schema, fileset).handle(); + newFilesetDetails(url, ignore, metalake, catalog, schema, fileset).validate().handle(); break; case CommandActions.CREATE: @@ -1102,6 +1123,7 @@ private void handleFilesetCommand() { String[] properties = line.getOptionValues(CommandActions.PROPERTIES); Map propertyMap = new Properties().parse(properties); newCreateFileset(url, ignore, metalake, catalog, schema, fileset, comment, propertyMap) + .validate() .handle(); break; } @@ -1109,7 +1131,9 @@ private void handleFilesetCommand() { case CommandActions.DELETE: { boolean force = line.hasOption(GravitinoOptions.FORCE); - newDeleteFileset(url, ignore, force, metalake, catalog, schema, fileset).handle(); + newDeleteFileset(url, ignore, force, metalake, catalog, schema, fileset) + .validate() + .handle(); break; } @@ -1118,6 +1142,7 @@ private void handleFilesetCommand() { String property = line.getOptionValue(GravitinoOptions.PROPERTY); String value = line.getOptionValue(GravitinoOptions.VALUE); newSetFilesetProperty(url, ignore, metalake, catalog, schema, fileset, property, value) + .validate() .handle(); break; } @@ -1126,12 +1151,15 @@ private void handleFilesetCommand() { { String property = line.getOptionValue(GravitinoOptions.PROPERTY); newRemoveFilesetProperty(url, ignore, metalake, catalog, schema, fileset, property) + .validate() .handle(); break; } case CommandActions.PROPERTIES: - newListFilesetProperties(url, ignore, metalake, catalog, schema, fileset).handle(); + newListFilesetProperties(url, ignore, metalake, catalog, schema, fileset) + .validate() + .handle(); break; case CommandActions.UPDATE: @@ -1139,11 +1167,14 @@ private void handleFilesetCommand() { if (line.hasOption(GravitinoOptions.COMMENT)) { String comment = line.getOptionValue(GravitinoOptions.COMMENT); newUpdateFilesetComment(url, ignore, metalake, catalog, schema, fileset, comment) + .validate() .handle(); } if (line.hasOption(GravitinoOptions.RENAME)) { String newName = line.getOptionValue(GravitinoOptions.RENAME); - newUpdateFilesetName(url, ignore, metalake, catalog, schema, fileset, newName).handle(); + newUpdateFilesetName(url, ignore, metalake, catalog, schema, fileset, newName) + .validate() + .handle(); } break; } @@ -1175,7 +1206,7 @@ private void handleModelCommand() { // Handle CommandActions.LIST action separately as it doesn't require the `model` if (CommandActions.LIST.equals(command)) { checkEntities(missingEntities); - newListModel(url, ignore, metalake, catalog, schema).handle(); + newListModel(url, ignore, metalake, catalog, schema).validate().handle(); return; } @@ -1186,12 +1217,48 @@ private void handleModelCommand() { switch (command) { case CommandActions.DETAILS: if (line.hasOption(GravitinoOptions.AUDIT)) { - newModelAudit(url, ignore, metalake, catalog, schema, model).handle(); + newModelAudit(url, ignore, metalake, catalog, schema, model).validate().handle(); } else { - newModelDetails(url, ignore, metalake, catalog, schema, model).handle(); + newModelDetails(url, ignore, metalake, catalog, schema, model).validate().handle(); } break; + case CommandActions.DELETE: + boolean force = line.hasOption(GravitinoOptions.FORCE); + newDeleteModel(url, ignore, force, metalake, catalog, schema, model).validate().handle(); + break; + + case CommandActions.CREATE: + String createComment = line.getOptionValue(GravitinoOptions.COMMENT); + String[] createProperties = line.getOptionValues(GravitinoOptions.PROPERTIES); + Map createPropertyMap = new Properties().parse(createProperties); + newCreateModel( + url, ignore, metalake, catalog, schema, model, createComment, createPropertyMap) + .validate() + .handle(); + break; + + case CommandActions.UPDATE: + String[] alias = line.getOptionValues(GravitinoOptions.ALIAS); + String uri = line.getOptionValue(GravitinoOptions.URI); + String linkComment = line.getOptionValue(GravitinoOptions.COMMENT); + String[] linkProperties = line.getOptionValues(CommandActions.PROPERTIES); + Map linkPropertityMap = new Properties().parse(linkProperties); + newLinkModel( + url, + ignore, + metalake, + catalog, + schema, + model, + uri, + alias, + linkComment, + linkPropertityMap) + .validate() + .handle(); + break; + default: System.err.println(ErrorMessages.UNSUPPORTED_ACTION); break; @@ -1274,7 +1341,7 @@ public String getAuth() { private void checkEntities(List entities) { if (!entities.isEmpty()) { - System.err.println("Missing required argument(s): " + COMMA_JOINER.join(entities)); + System.err.println(ErrorMessages.MISSING_ENTITIES + COMMA_JOINER.join(entities)); Main.exit(-1); } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoOptions.java b/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoOptions.java index 657566036dc..aaeb8f0184f 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoOptions.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/GravitinoOptions.java @@ -62,6 +62,8 @@ public class GravitinoOptions { public static final String ALL = "all"; public static final String ENABLE = "enable"; public static final String DISABLE = "disable"; + public static final String ALIAS = "alias"; + public static final String URI = "uri"; /** * Builds and returns the CLI options for Gravitino. @@ -109,6 +111,10 @@ public Options options() { options.addOption(createArgOption(COLUMNFILE, "CSV file describing columns")); options.addOption(createSimpleOption(null, ALL, "all operation for --enable")); + // model options + options.addOption(createArgOption(null, URI, "model version artifact")); + options.addOption(createArgsOption(null, ALIAS, "model aliases")); + // Options that support multiple values options.addOption(createArgsOption("p", PROPERTIES, "property name/value pairs")); options.addOption(createArgsOption("t", TAG, "tag name")); diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/Main.java b/clients/cli/src/main/java/org/apache/gravitino/cli/Main.java index 1f4a3926ef5..8c28d7e8a29 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/Main.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/Main.java @@ -56,7 +56,7 @@ public static void main(String[] args) { commandLine.handleSimpleLine(); } } catch (ParseException exp) { - System.err.println("Error parsing command line: " + exp.getMessage()); + System.err.println(ErrorMessages.PARSE_ERROR + exp.getMessage()); GravitinoCommandLine.displayHelp(options); exit(-1); } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/ParseType.java b/clients/cli/src/main/java/org/apache/gravitino/cli/ParseType.java index e797d0552ad..9442175ef80 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/ParseType.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/ParseType.java @@ -22,6 +22,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.gravitino.rel.types.Type; +import org.apache.gravitino.rel.types.Types; public class ParseType { @@ -36,7 +37,7 @@ public class ParseType { * @return a {@link org.apache.gravitino.cli.ParsedType} object representing the parsed type name. * @throws IllegalArgumentException if the data type format is unsupported or malformed */ - public static ParsedType parse(String datatype) { + public static ParsedType parseBasicType(String datatype) { Pattern pattern = Pattern.compile("^(\\w+)\\((\\d+)(?:,(\\d+))?\\)$"); Matcher matcher = pattern.matcher(datatype); @@ -57,8 +58,8 @@ public static ParsedType parse(String datatype) { return null; } - public static Type toType(String datatype) { - ParsedType parsed = parse(datatype); + private static Type toBasicType(String datatype) { + ParsedType parsed = parseBasicType(datatype); if (parsed != null) { if (parsed.getPrecision() != null && parsed.getScale() != null) { @@ -70,4 +71,36 @@ public static Type toType(String datatype) { return TypeConverter.convert(datatype); } + + private static Type toListType(String datatype) { + Pattern pattern = Pattern.compile("^list\\((.+)\\)$"); + Matcher matcher = pattern.matcher(datatype); + if (matcher.matches()) { + Type elementType = toBasicType(matcher.group(1)); + return Types.ListType.of(elementType, false); + } + throw new IllegalArgumentException("Malformed list type: " + datatype); + } + + private static Type toMapType(String datatype) { + Pattern pattern = Pattern.compile("^map\\((.+),(.+)\\)$"); + Matcher matcher = pattern.matcher(datatype); + if (matcher.matches()) { + Type keyType = toBasicType(matcher.group(1)); + Type valueType = toBasicType(matcher.group(2)); + return Types.MapType.of(keyType, valueType, false); + } + throw new IllegalArgumentException("Malformed map type: " + datatype); + } + + public static Type toType(String datatype) { + if (datatype.startsWith("list")) { + return toListType(datatype); + } else if (datatype.startsWith("map")) { + return toMapType(datatype); + } + + // fallback: if not complex type, parse as primitive type + return toBasicType(datatype); + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/Privileges.java b/clients/cli/src/main/java/org/apache/gravitino/cli/Privileges.java index 9d47d8fc9c8..fa904663318 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/Privileges.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/Privileges.java @@ -112,7 +112,7 @@ public static Privilege.Name toName(String privilege) { case MANAGE_GRANTS: return Privilege.Name.MANAGE_GRANTS; default: - System.err.println("Unknown privilege"); + System.err.println(ErrorMessages.UNKNOWN_PRIVILEGE + " " + privilege); return null; } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/TestableCommandLine.java b/clients/cli/src/main/java/org/apache/gravitino/cli/TestableCommandLine.java index 6a468749178..c08a0950523 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/TestableCommandLine.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/TestableCommandLine.java @@ -45,6 +45,7 @@ import org.apache.gravitino.cli.commands.DeleteFileset; import org.apache.gravitino.cli.commands.DeleteGroup; import org.apache.gravitino.cli.commands.DeleteMetalake; +import org.apache.gravitino.cli.commands.DeleteModel; import org.apache.gravitino.cli.commands.DeleteRole; import org.apache.gravitino.cli.commands.DeleteSchema; import org.apache.gravitino.cli.commands.DeleteTable; @@ -55,6 +56,7 @@ import org.apache.gravitino.cli.commands.GrantPrivilegesToRole; import org.apache.gravitino.cli.commands.GroupAudit; import org.apache.gravitino.cli.commands.GroupDetails; +import org.apache.gravitino.cli.commands.LinkModel; import org.apache.gravitino.cli.commands.ListAllTags; import org.apache.gravitino.cli.commands.ListCatalogProperties; import org.apache.gravitino.cli.commands.ListCatalogs; @@ -83,6 +85,7 @@ import org.apache.gravitino.cli.commands.ModelAudit; import org.apache.gravitino.cli.commands.ModelDetails; import org.apache.gravitino.cli.commands.OwnerDetails; +import org.apache.gravitino.cli.commands.RegisterModel; import org.apache.gravitino.cli.commands.RemoveAllTags; import org.apache.gravitino.cli.commands.RemoveCatalogProperty; import org.apache.gravitino.cli.commands.RemoveFilesetProperty; @@ -925,4 +928,42 @@ protected ModelDetails newModelDetails( String url, boolean ignore, String metalake, String catalog, String schema, String model) { return new ModelDetails(url, ignore, metalake, catalog, schema, model); } + + protected RegisterModel newCreateModel( + String url, + boolean ignore, + String metalake, + String catalog, + String schema, + String model, + String comment, + Map properties) { + return new RegisterModel(url, ignore, metalake, catalog, schema, model, comment, properties); + } + + protected DeleteModel newDeleteModel( + String url, + boolean ignore, + boolean force, + String metalake, + String catalog, + String schema, + String model) { + return new DeleteModel(url, ignore, force, metalake, catalog, schema, model); + } + + protected LinkModel newLinkModel( + String url, + boolean ignore, + String metalake, + String catalog, + String schema, + String model, + String uri, + String[] alias, + String comment, + Map properties) { + return new LinkModel( + url, ignore, metalake, catalog, schema, model, uri, alias, comment, properties); + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/Command.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/Command.java index f91dae40425..ea6abdd6393 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/Command.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/Command.java @@ -21,7 +21,9 @@ import static org.apache.gravitino.client.GravitinoClientBase.Builder; +import com.google.common.base.Joiner; import java.io.File; +import org.apache.gravitino.cli.ErrorMessages; import org.apache.gravitino.cli.GravitinoConfig; import org.apache.gravitino.cli.KerberosData; import org.apache.gravitino.cli.Main; @@ -39,6 +41,7 @@ public abstract class Command { public static final String OUTPUT_FORMAT_TABLE = "table"; public static final String OUTPUT_FORMAT_PLAIN = "plain"; + public static final Joiner COMMA_JOINER = Joiner.on(", ").skipNulls(); protected static String authentication = null; protected static String userName = null; @@ -46,7 +49,6 @@ public abstract class Command { private static final String SIMPLE_AUTH = "simple"; private static final String OAUTH_AUTH = "oauth"; private static final String KERBEROS_AUTH = "kerberos"; - private final String url; private final boolean ignoreVersions; private final String outputFormat; @@ -99,6 +101,37 @@ public static void setAuthenticationMode(String authentication, String userName) /** All commands have a handle method to handle and run the required command. */ public abstract void handle(); + + /** + * verify the arguments. All commands have a verify method to verify the arguments. + * + * @return Returns itself via argument validation, otherwise exits. + */ + public Command validate() { + return this; + } + + /** + * Validates that both property and value arguments are not null. + * + * @param property The property name to check + * @param value The value associated with the property + */ + protected void validatePropertyAndValue(String property, String value) { + if (property == null && value == null) exitWithError(ErrorMessages.MISSING_PROPERTY_AND_VALUE); + if (property == null) exitWithError(ErrorMessages.MISSING_PROPERTY); + if (value == null) exitWithError(ErrorMessages.MISSING_VALUE); + } + + /** + * Validates that the property argument is not null. + * + * @param property The property name to validate + */ + protected void validateProperty(String property) { + if (property == null) exitWithError(ErrorMessages.MISSING_PROPERTY); + } + /** * Builds a {@link GravitinoClient} instance with the provided server URL and metalake. * diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/CreateCatalog.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/CreateCatalog.java index e0c11c1e040..2870dd7103e 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/CreateCatalog.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/CreateCatalog.java @@ -81,4 +81,10 @@ public void handle() { System.out.println(catalog + " catalog created"); } + + @Override + public Command validate() { + if (provider == null) exitWithError(ErrorMessages.MISSING_PROVIDER); + return super.validate(); + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/CreateTable.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/CreateTable.java index fefa6267221..aa409941e59 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/CreateTable.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/CreateTable.java @@ -108,4 +108,10 @@ public void handle() { System.out.println(table + " created"); } + + @Override + public Command validate() { + if (columnFile == null) exitWithError(ErrorMessages.MISSING_COLUMN_FILE); + return super.validate(); + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/CreateTag.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/CreateTag.java index 0dd4289bb75..dabf34c8b1b 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/CreateTag.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/CreateTag.java @@ -53,7 +53,7 @@ public CreateTag( @Override public void handle() { if (tags == null || tags.length == 0) { - System.err.println(ErrorMessages.TAG_EMPTY); + System.err.println(ErrorMessages.MISSING_TAG); } else { boolean hasOnlyOneTag = tags.length == 1; if (hasOnlyOneTag) { @@ -103,4 +103,10 @@ private void handleMultipleTags() { System.out.println("Tags " + String.join(",", remaining) + " not created"); } } + + @Override + public Command validate() { + if (tags == null) exitWithError(ErrorMessages.MISSING_TAG); + return super.validate(); + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/DeleteCatalog.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/DeleteCatalog.java index 6aa8e5ad904..7cb9bf7d9c8 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/DeleteCatalog.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/DeleteCatalog.java @@ -66,7 +66,7 @@ public void handle() { } catch (NoSuchCatalogException err) { exitWithError(ErrorMessages.UNKNOWN_CATALOG); } catch (CatalogInUseException catalogInUseException) { - System.err.println(catalog + " in use, please disable it first."); + System.err.println(catalog + ErrorMessages.ENTITY_IN_USE); } catch (Exception exp) { exitWithError(exp.getMessage()); } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/DeleteMetalake.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/DeleteMetalake.java index e88ae41486f..3bad108a9ec 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/DeleteMetalake.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/DeleteMetalake.java @@ -58,7 +58,7 @@ public void handle() { } catch (NoSuchMetalakeException err) { exitWithError(ErrorMessages.UNKNOWN_METALAKE); } catch (MetalakeInUseException inUseException) { - System.err.println(metalake + " in use, please disable it first."); + System.err.println(metalake + ErrorMessages.ENTITY_IN_USE); } catch (Exception exp) { exitWithError(exp.getMessage()); } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/DeleteModel.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/DeleteModel.java new file mode 100644 index 00000000000..f44814ce68c --- /dev/null +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/DeleteModel.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.gravitino.cli.commands; + +import org.apache.gravitino.NameIdentifier; +import org.apache.gravitino.cli.AreYouSure; +import org.apache.gravitino.cli.ErrorMessages; +import org.apache.gravitino.client.GravitinoClient; +import org.apache.gravitino.exceptions.NoSuchCatalogException; +import org.apache.gravitino.exceptions.NoSuchMetalakeException; +import org.apache.gravitino.exceptions.NoSuchModelException; +import org.apache.gravitino.exceptions.NoSuchSchemaException; + +/** Deletes an existing model. */ +public class DeleteModel extends Command { + + protected final String metalake; + protected final String catalog; + protected final String schema; + protected final String model; + protected final boolean force; + + /** + * Deletes an existing model. + * + * @param url The URL of the Gravitino server. + * @param ignoreVersions If true don't check the client/server versions match. + * @param force Force operation. + * @param metalake The name of the metalake. + * @param catalog The name of the catalog. + * @param schema The name of the schema. + * @param model The name of the model. + */ + public DeleteModel( + String url, + boolean ignoreVersions, + boolean force, + String metalake, + String catalog, + String schema, + String model) { + super(url, ignoreVersions); + this.force = force; + this.metalake = metalake; + this.catalog = catalog; + this.schema = schema; + this.model = model; + } + + /** Deletes an existing model. */ + public void handle() { + boolean deleted = false; + + if (!AreYouSure.really(force)) { + return; + } + + try (GravitinoClient client = buildClient(metalake)) { + NameIdentifier name = NameIdentifier.of(schema, model); + deleted = client.loadCatalog(catalog).asModelCatalog().deleteModel(name); + } catch (NoSuchMetalakeException noSuchMetalakeException) { + exitWithError(ErrorMessages.UNKNOWN_METALAKE); + } catch (NoSuchCatalogException noSuchCatalogException) { + exitWithError(ErrorMessages.UNKNOWN_CATALOG); + } catch (NoSuchSchemaException noSuchSchemaException) { + exitWithError(ErrorMessages.UNKNOWN_SCHEMA); + } catch (NoSuchModelException noSuchModelException) { + exitWithError(ErrorMessages.UNKNOWN_MODEL); + } catch (Exception err) { + exitWithError(err.getMessage()); + } + + if (deleted) { + System.out.println(model + " deleted."); + } else { + System.out.println(model + " not deleted."); + } + } +} diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/DeleteTag.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/DeleteTag.java index d3db384c094..26919e06acf 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/DeleteTag.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/DeleteTag.java @@ -59,7 +59,7 @@ public void handle() { } if (tags == null || tags.length == 0) { - System.err.println(ErrorMessages.TAG_EMPTY); + System.err.println(ErrorMessages.MISSING_TAG); } else { boolean hasOnlyOneTag = tags.length == 1; if (hasOnlyOneTag) { @@ -116,4 +116,10 @@ private void handleOnlyOneTag() { System.out.println("Tag " + tags[0] + " not deleted."); } } + + @Override + public Command validate() { + if (tags == null) exitWithError(ErrorMessages.MISSING_TAG); + return super.validate(); + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/GrantPrivilegesToRole.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/GrantPrivilegesToRole.java index e3c9fa4944e..8630282ea60 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/GrantPrivilegesToRole.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/GrantPrivilegesToRole.java @@ -73,7 +73,7 @@ public void handle() { for (String privilege : privileges) { if (!Privileges.isValid(privilege)) { - System.err.println("Unknown privilege " + privilege); + System.err.println(ErrorMessages.UNKNOWN_PRIVILEGE + " " + privilege); return; } PrivilegeDTO privilegeDTO = @@ -103,4 +103,10 @@ public void handle() { String all = String.join(",", privileges); System.out.println(role + " granted " + all + " on " + entity.getName()); } + + @Override + public Command validate() { + if (privileges == null) exitWithError(ErrorMessages.MISSING_PRIVILEGES); + return super.validate(); + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/LinkModel.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/LinkModel.java new file mode 100644 index 00000000000..cf34eae882a --- /dev/null +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/LinkModel.java @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.gravitino.cli.commands; + +/** Link a new model version to the registered model. */ +import java.util.Arrays; +import java.util.Map; +import org.apache.gravitino.NameIdentifier; +import org.apache.gravitino.cli.ErrorMessages; +import org.apache.gravitino.client.GravitinoClient; +import org.apache.gravitino.exceptions.ModelVersionAliasesAlreadyExistException; +import org.apache.gravitino.exceptions.NoSuchCatalogException; +import org.apache.gravitino.exceptions.NoSuchMetalakeException; +import org.apache.gravitino.exceptions.NoSuchModelException; +import org.apache.gravitino.exceptions.NoSuchSchemaException; +import org.apache.gravitino.model.ModelCatalog; + +public class LinkModel extends Command { + protected final String metalake; + protected final String catalog; + protected final String schema; + protected final String model; + protected final String uri; + protected final String[] alias; + protected final String comment; + protected final Map properties; + + /** + * Link a new model version to the registered model. + * + * @param url The URL of the Gravitino server. + * @param ignoreVersions If true don't check the client/server versions match. + * @param metalake The name of the metalake. + * @param catalog The name of the catalog. + * @param schema The name of schema. + * @param model The name of model. + * @param uri The URI of the model version artifact. + * @param alias The aliases of the model version. + * @param comment The comment of the model version. + * @param properties The properties of the model version. + */ + public LinkModel( + String url, + boolean ignoreVersions, + String metalake, + String catalog, + String schema, + String model, + String uri, + String[] alias, + String comment, + Map properties) { + super(url, ignoreVersions); + this.metalake = metalake; + this.catalog = catalog; + this.schema = schema; + this.model = model; + this.uri = uri; + this.alias = alias; + this.comment = comment; + this.properties = properties; + } + + /** Link a new model version to the registered model. */ + @Override + public void handle() { + NameIdentifier name = NameIdentifier.of(schema, model); + + try { + GravitinoClient client = buildClient(metalake); + ModelCatalog modelCatalog = client.loadCatalog(catalog).asModelCatalog(); + modelCatalog.linkModelVersion(name, uri, alias, comment, properties); + } catch (NoSuchMetalakeException err) { + exitWithError(ErrorMessages.UNKNOWN_METALAKE); + } catch (NoSuchCatalogException err) { + exitWithError(ErrorMessages.UNKNOWN_CATALOG); + } catch (NoSuchSchemaException err) { + exitWithError(ErrorMessages.UNKNOWN_SCHEMA); + } catch (NoSuchModelException err) { + exitWithError(ErrorMessages.UNKNOWN_MODEL); + } catch (ModelVersionAliasesAlreadyExistException err) { + exitWithError(Arrays.toString(alias) + " already exist."); + } catch (Exception err) { + exitWithError(err.getMessage()); + } + + System.out.println( + "Linked model " + model + " to " + uri + " with aliases " + Arrays.toString(alias)); + } + + @Override + public Command validate() { + if (uri == null) exitWithError(ErrorMessages.MISSING_URI); + return super.validate(); + } +} diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RegisterModel.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RegisterModel.java new file mode 100644 index 00000000000..7c8cd120bf4 --- /dev/null +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RegisterModel.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.gravitino.cli.commands; + +import java.util.Map; +import org.apache.gravitino.NameIdentifier; +import org.apache.gravitino.cli.ErrorMessages; +import org.apache.gravitino.cli.Main; +import org.apache.gravitino.client.GravitinoClient; +import org.apache.gravitino.exceptions.ModelAlreadyExistsException; +import org.apache.gravitino.exceptions.NoSuchCatalogException; +import org.apache.gravitino.exceptions.NoSuchMetalakeException; +import org.apache.gravitino.exceptions.NoSuchSchemaException; +import org.apache.gravitino.model.Model; +import org.apache.gravitino.model.ModelCatalog; + +/** Register a model in the catalog */ +public class RegisterModel extends Command { + + protected final String metalake; + protected final String catalog; + protected final String schema; + protected final String model; + protected final String comment; + protected final Map properties; + + /** + * Register a model in the catalog + * + * @param url The URL of the Gravitino server. + * @param ignoreVersions If true don't check the client/server versions match. + * @param metalake The name of the metalake. + * @param catalog The name of the catalog. + * @param schema The name of schema. + * @param model The name of model. + * @param comment The comment of the model version. + * @param properties The properties of the model version. + */ + public RegisterModel( + String url, + boolean ignoreVersions, + String metalake, + String catalog, + String schema, + String model, + String comment, + Map properties) { + super(url, ignoreVersions); + this.metalake = metalake; + this.catalog = catalog; + this.schema = schema; + this.model = model; + this.comment = comment; + this.properties = properties; + } + + /** Register a model in the catalog */ + @Override + public void handle() { + NameIdentifier name = NameIdentifier.of(schema, model); + Model registeredModel = null; + + try { + GravitinoClient client = buildClient(metalake); + ModelCatalog modelCatalog = client.loadCatalog(catalog).asModelCatalog(); + registeredModel = modelCatalog.registerModel(name, comment, properties); + } catch (NoSuchMetalakeException err) { + exitWithError(ErrorMessages.UNKNOWN_METALAKE); + } catch (NoSuchCatalogException err) { + exitWithError(ErrorMessages.UNKNOWN_CATALOG); + } catch (NoSuchSchemaException err) { + exitWithError(ErrorMessages.UNKNOWN_SCHEMA); + } catch (ModelAlreadyExistsException err) { + exitWithError(ErrorMessages.MODEL_EXISTS); + } catch (Exception err) { + exitWithError(err.getMessage()); + } + + if (registeredModel != null) { + System.out.println("Successful register " + registeredModel.name() + "."); + } else { + System.err.println(ErrorMessages.REGISTER_FAILED + model + "."); + Main.exit(-1); + } + } +} diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveAllTags.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveAllTags.java index a7aa3748a15..5221100a8e9 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveAllTags.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveAllTags.java @@ -118,4 +118,10 @@ public void handle() { System.out.println(entity + " has no tags"); } } + + @Override + public Command validate() { + if (name == null || !name.hasName()) exitWithError(ErrorMessages.MISSING_NAME); + return super.validate(); + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveCatalogProperty.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveCatalogProperty.java index a460d91b2fe..dc1a76765b1 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveCatalogProperty.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveCatalogProperty.java @@ -66,4 +66,10 @@ public void handle() { System.out.println(property + " property removed."); } + + @Override + public Command validate() { + validateProperty(property); + return super.validate(); + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveFilesetProperty.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveFilesetProperty.java index 00deebe265a..c443bf0fdfe 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveFilesetProperty.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveFilesetProperty.java @@ -86,4 +86,10 @@ public void handle() { System.out.println(property + " property removed."); } + + @Override + public Command validate() { + validateProperty(property); + return super.validate(); + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveMetalakeProperty.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveMetalakeProperty.java index 9642456f375..ce3a50fee16 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveMetalakeProperty.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveMetalakeProperty.java @@ -60,4 +60,10 @@ public void handle() { System.out.println(property + " property removed."); } + + @Override + public Command validate() { + validateProperty(property); + return super.validate(); + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveSchemaProperty.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveSchemaProperty.java index 6fc41c01252..8fedcb62168 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveSchemaProperty.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveSchemaProperty.java @@ -77,4 +77,10 @@ public void handle() { System.out.println(property + " property removed."); } + + @Override + public Command validate() { + validateProperty(property); + return super.validate(); + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveTableProperty.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveTableProperty.java index 8b3cd2383fb..af370ce64b7 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveTableProperty.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveTableProperty.java @@ -86,4 +86,10 @@ public void handle() { System.out.println(property + " property removed."); } + + @Override + public Command validate() { + validateProperty(property); + return super.validate(); + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveTopicProperty.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveTopicProperty.java index a43820933e8..51be0a139d9 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveTopicProperty.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RemoveTopicProperty.java @@ -87,4 +87,10 @@ public void handle() { System.out.println(property + " property removed."); } + + @Override + public Command validate() { + validateProperty(property); + return super.validate(); + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RevokePrivilegesFromRole.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RevokePrivilegesFromRole.java index 8077532319e..3bfa7cd4526 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RevokePrivilegesFromRole.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/RevokePrivilegesFromRole.java @@ -73,7 +73,7 @@ public void handle() { for (String privilege : privileges) { if (!Privileges.isValid(privilege)) { - System.err.println("Unknown privilege " + privilege); + System.err.println(ErrorMessages.UNKNOWN_PRIVILEGE + " " + privilege); return; } PrivilegeDTO privilegeDTO = @@ -103,4 +103,10 @@ public void handle() { String all = String.join(",", privileges); System.out.println(role + " revoked " + all + " on " + entity.getName()); } + + @Override + public Command validate() { + if (privileges == null) exitWithError(ErrorMessages.MISSING_PRIVILEGES); + return super.validate(); + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetCatalogProperty.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetCatalogProperty.java index 21b1a6f1c9f..034b1b8e2a3 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetCatalogProperty.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetCatalogProperty.java @@ -74,4 +74,10 @@ public void handle() { System.out.println(catalog + " property set."); } + + @Override + public Command validate() { + validatePropertyAndValue(property, value); + return this; + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetFilesetProperty.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetFilesetProperty.java index 2c179db104c..afafa3c9dbd 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetFilesetProperty.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetFilesetProperty.java @@ -90,4 +90,10 @@ public void handle() { System.out.println(schema + " property set."); } + + @Override + public Command validate() { + validatePropertyAndValue(property, value); + return super.validate(); + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetMetalakeProperty.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetMetalakeProperty.java index 817beaec91e..ef67d008bc8 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetMetalakeProperty.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetMetalakeProperty.java @@ -63,4 +63,10 @@ public void handle() { System.out.println(metalake + " property set."); } + + @Override + public Command validate() { + validatePropertyAndValue(property, value); + return this; + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetSchemaProperty.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetSchemaProperty.java index cc6151eaa2c..bd9851ba8cb 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetSchemaProperty.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetSchemaProperty.java @@ -81,4 +81,10 @@ public void handle() { System.out.println(schema + " property set."); } + + @Override + public Command validate() { + validatePropertyAndValue(property, value); + return this; + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetTableProperty.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetTableProperty.java index 0209d218250..54ab88f3435 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetTableProperty.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetTableProperty.java @@ -90,4 +90,10 @@ public void handle() { System.out.println(table + " property set."); } + + @Override + public Command validate() { + validatePropertyAndValue(property, value); + return super.validate(); + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetTagProperty.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetTagProperty.java index b5b46b59a71..da7a267b8d4 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetTagProperty.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetTagProperty.java @@ -74,4 +74,10 @@ public void handle() { System.out.println(tag + " property set."); } + + @Override + public Command validate() { + validatePropertyAndValue(property, value); + return super.validate(); + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetTopicProperty.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetTopicProperty.java index 941c0b0321e..2641259cdde 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetTopicProperty.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/SetTopicProperty.java @@ -92,4 +92,10 @@ public void handle() { System.out.println(property + " property set."); } + + @Override + public Command validate() { + validatePropertyAndValue(property, value); + return super.validate(); + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/TagEntity.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/TagEntity.java index 7bc8ec37649..4a06918850d 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/TagEntity.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/TagEntity.java @@ -105,4 +105,10 @@ public void handle() { System.out.println(entity + " now tagged with " + all); } + + @Override + public Command validate() { + if (name == null || !name.hasName()) exitWithError(ErrorMessages.MISSING_NAME); + return super.validate(); + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/UntagEntity.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/UntagEntity.java index 8f4a4a9cf02..3503d5eb7bf 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/UntagEntity.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/UntagEntity.java @@ -113,4 +113,10 @@ public void handle() { System.out.println(entity + " removed tag " + tags[0].toString() + " now tagged with " + all); } } + + @Override + public Command validate() { + if (name == null || !name.hasName()) exitWithError(ErrorMessages.MISSING_NAME); + return super.validate(); + } } diff --git a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/UpdateColumnDefault.java b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/UpdateColumnDefault.java index 7c7c2d3b402..976cf623054 100644 --- a/clients/cli/src/main/java/org/apache/gravitino/cli/commands/UpdateColumnDefault.java +++ b/clients/cli/src/main/java/org/apache/gravitino/cli/commands/UpdateColumnDefault.java @@ -103,4 +103,10 @@ public void handle() { System.out.println(column + " default changed."); } + + @Override + public Command validate() { + if (dataType == null) exitWithError(ErrorMessages.MISSING_DATATYPE); + return super.validate(); + } } diff --git a/clients/cli/src/main/resources/model_help.txt b/clients/cli/src/main/resources/model_help.txt index 04e9b8262ef..7becf2fd55d 100644 --- a/clients/cli/src/main/resources/model_help.txt +++ b/clients/cli/src/main/resources/model_help.txt @@ -1,8 +1,41 @@ -gcli model [details] +gcli model [list|details|create|update|delete] Please set the metalake in the Gravitino configuration file or the environment variable before running any of these commands. Example commands +Register a model +gcli model create --name hadoop.schema.model + +Register a model with comment +gcli model create --name hadoop.schema.model --comment comment + +Register a model with properties +gcli model create --name hadoop.schema.model --properties key1=val1 key2=val2 + +Register a model with properties" and comment +gcli model create --name hadoop.schema.model --properties key1=val1 key2=val2 --comment comment + +List models +gcli model list --name hadoop.schema + +Show a model's details +gcli model details --name hadoop.schema.model + Show model audit information -gcli model details --name catalog_postgres.hr --audit \ No newline at end of file +gcli model details --name hadoop.schema.model --audit + +Link a model +gcli model update --name hadoop.schema.model --uri file:///tmp/file + +Link a model with alias +gcli model update --name hadoop.schema.model --uri file:///tmp/file --alias aliasA aliasB + +Link a model with all component +gcli model update --name hadoop.schema.model --uri file:///tmp/file --alias aliasA aliasB --comment comment --properties key1=val1 key2=val2 + +Link a model without uri +gcli model update --name hadoop.schema.model + +Delete a model +gcli model delete --name hadoop.schema.model \ No newline at end of file diff --git a/clients/cli/src/test/java/org/apache/gravitino/cli/TestCatalogCommands.java b/clients/cli/src/test/java/org/apache/gravitino/cli/TestCatalogCommands.java index 44e5537955f..afa19b94c5a 100644 --- a/clients/cli/src/test/java/org/apache/gravitino/cli/TestCatalogCommands.java +++ b/clients/cli/src/test/java/org/apache/gravitino/cli/TestCatalogCommands.java @@ -92,6 +92,7 @@ void testListCatalogsCommand() { doReturn(mockList) .when(commandLine) .newListCatalogs(GravitinoCommandLine.DEFAULT_URL, false, null, "metalake_demo"); + doReturn(mockList).when(mockList).validate(); commandLine.handleCommandLine(); verify(mockList).handle(); } @@ -112,6 +113,7 @@ void testCatalogDetailsCommand() { .when(commandLine) .newCatalogDetails( GravitinoCommandLine.DEFAULT_URL, false, null, "metalake_demo", "catalog"); + doReturn(mockDetails).when(mockDetails).validate(); commandLine.handleCommandLine(); verify(mockDetails).handle(); } @@ -131,6 +133,7 @@ void testCatalogAuditCommand() { doReturn(mockAudit) .when(commandLine) .newCatalogAudit(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog"); + doReturn(mockAudit).when(mockAudit).validate(); commandLine.handleCommandLine(); verify(mockAudit).handle(); } @@ -167,10 +170,30 @@ void testCreateCatalogCommand() { "postgres", "comment", map); + doReturn(mockCreate).when(mockCreate).validate(); commandLine.handleCommandLine(); verify(mockCreate).handle(); } + @Test + void testCreateCatalogCommandWithoutProvider() { + Main.useExit = false; + CreateCatalog mockCreateCatalog = + spy( + new CreateCatalog( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + "catalog", + null, + "comment", + null)); + + assertThrows(RuntimeException.class, mockCreateCatalog::validate); + String errOutput = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_PROVIDER, errOutput); + } + @Test void testDeleteCatalogCommand() { DeleteCatalog mockDelete = mock(DeleteCatalog.class); @@ -186,6 +209,7 @@ void testDeleteCatalogCommand() { .when(commandLine) .newDeleteCatalog( GravitinoCommandLine.DEFAULT_URL, false, false, "metalake_demo", "catalog"); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -206,6 +230,7 @@ void testDeleteCatalogForceCommand() { .when(commandLine) .newDeleteCatalog( GravitinoCommandLine.DEFAULT_URL, false, true, "metalake_demo", "catalog"); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -234,10 +259,60 @@ void testSetCatalogPropertyCommand() { "catalog", "property", "value"); + doReturn(mockSetProperty).when(mockSetProperty).validate(); commandLine.handleCommandLine(); verify(mockSetProperty).handle(); } + @Test + void testSetCatalogPropertyCommandWithoutPropertyAndValue() { + Main.useExit = false; + SetCatalogProperty mockSetProperty = + spy( + new SetCatalogProperty( + GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", null, null)); + + assertThrows(RuntimeException.class, mockSetProperty::validate); + String errOutput = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals("Missing --property and --value options.", errOutput); + } + + @Test + void testSetCatalogPropertyCommandWithoutProperty() { + Main.useExit = false; + SetCatalogProperty mockSetProperty = + spy( + new SetCatalogProperty( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + "catalog", + null, + "value")); + + assertThrows(RuntimeException.class, mockSetProperty::validate); + String errOutput = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_PROPERTY, errOutput); + } + + @Test + void testSetCatalogPropertyCommandWithoutValue() { + Main.useExit = false; + SetCatalogProperty mockSetProperty = + spy( + new SetCatalogProperty( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + "catalog", + "property", + null)); + + assertThrows(RuntimeException.class, mockSetProperty::validate); + String errOutput = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_VALUE, errOutput); + } + @Test void testRemoveCatalogPropertyCommand() { RemoveCatalogProperty mockRemoveProperty = mock(RemoveCatalogProperty.class); @@ -255,10 +330,24 @@ void testRemoveCatalogPropertyCommand() { .when(commandLine) .newRemoveCatalogProperty( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", "property"); + doReturn(mockRemoveProperty).when(mockRemoveProperty).validate(); commandLine.handleCommandLine(); verify(mockRemoveProperty).handle(); } + @Test + void testRemoveCatalogPropertyCommandWithoutProperty() { + Main.useExit = false; + RemoveCatalogProperty mockRemoveProperty = + spy( + new RemoveCatalogProperty( + GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", null)); + + assertThrows(RuntimeException.class, mockRemoveProperty::validate); + String errOutput = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_PROPERTY, errOutput); + } + @Test void testListCatalogPropertiesCommand() { ListCatalogProperties mockListProperties = mock(ListCatalogProperties.class); @@ -274,6 +363,7 @@ void testListCatalogPropertiesCommand() { .when(commandLine) .newListCatalogProperties( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog"); + doReturn(mockListProperties).when(mockListProperties).validate(); commandLine.handleCommandLine(); verify(mockListProperties).handle(); } @@ -295,6 +385,7 @@ void testUpdateCatalogCommentCommand() { .when(commandLine) .newUpdateCatalogComment( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", "new comment"); + doReturn(mockUpdateComment).when(mockUpdateComment).validate(); commandLine.handleCommandLine(); verify(mockUpdateComment).handle(); } @@ -317,6 +408,7 @@ void testUpdateCatalogNameCommand() { .when(commandLine) .newUpdateCatalogName( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", "new_name"); + doReturn(mockUpdateName).when(mockUpdateName).validate(); commandLine.handleCommandLine(); verify(mockUpdateName).handle(); } @@ -345,9 +437,9 @@ void testCatalogDetailsCommandWithoutCatalog() { String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); assertEquals( output, - "Missing --name option." + ErrorMessages.MISSING_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + CommandEntities.CATALOG); } @@ -368,6 +460,7 @@ void testEnableCatalogCommand() { .when(commandLine) .newCatalogEnable( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", false); + doReturn(mockEnable).when(mockEnable).validate(); commandLine.handleCommandLine(); verify(mockEnable).handle(); } @@ -390,6 +483,7 @@ void testEnableCatalogCommandWithRecursive() { .when(commandLine) .newCatalogEnable( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", true); + doReturn(mockEnable).when(mockEnable).validate(); commandLine.handleCommandLine(); verify(mockEnable).handle(); } @@ -410,6 +504,7 @@ void testDisableCatalogCommand() { doReturn(mockDisable) .when(commandLine) .newCatalogDisable(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog"); + doReturn(mockDisable).when(mockDisable).validate(); commandLine.handleCommandLine(); verify(mockDisable).handle(); } @@ -436,6 +531,6 @@ void testCatalogWithDisableAndEnableOptions() { GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", false); verify(commandLine, never()) .newCatalogDisable(GravitinoCommandLine.DEFAULT_URL, false, "melake_demo", "catalog"); - assertTrue(errContent.toString().contains("Unable to enable and disable at the same time")); + assertTrue(errContent.toString().contains(ErrorMessages.INVALID_ENABLE_DISABLE)); } } diff --git a/clients/cli/src/test/java/org/apache/gravitino/cli/TestColumnCommands.java b/clients/cli/src/test/java/org/apache/gravitino/cli/TestColumnCommands.java index b6159343ef0..31a3139482c 100644 --- a/clients/cli/src/test/java/org/apache/gravitino/cli/TestColumnCommands.java +++ b/clients/cli/src/test/java/org/apache/gravitino/cli/TestColumnCommands.java @@ -93,6 +93,7 @@ void testListColumnsCommand() { .when(commandLine) .newListColumns( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", "schema", "users"); + doReturn(mockList).when(mockList).validate(); commandLine.handleCommandLine(); verify(mockList).handle(); } @@ -120,6 +121,7 @@ void testColumnAuditCommand() { "schema", "users", "name"); + doReturn(mockAudit).when(mockAudit).validate(); commandLine.handleCommandLine(); verify(mockAudit).handle(); } @@ -187,6 +189,7 @@ void testAddColumn() { true, false, null); + doReturn(mockAddColumn).when(mockAddColumn).validate(); commandLine.handleCommandLine(); verify(mockAddColumn).handle(); } @@ -214,6 +217,7 @@ void testDeleteColumn() { "schema", "users", "name"); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -246,6 +250,7 @@ void testUpdateColumnComment() { "users", "name", "new comment"); + doReturn(mockUpdateColumn).when(mockUpdateColumn).validate(); commandLine.handleCommandLine(); verify(mockUpdateColumn).handle(); } @@ -278,6 +283,7 @@ void testUpdateColumnName() { "users", "name", "renamed"); + doReturn(mockUpdateName).when(mockUpdateName).validate(); commandLine.handleCommandLine(); verify(mockUpdateName).handle(); } @@ -310,6 +316,7 @@ void testUpdateColumnDatatype() { "users", "name", "varchar(250)"); + doReturn(mockUpdateDatatype).when(mockUpdateDatatype).validate(); commandLine.handleCommandLine(); verify(mockUpdateDatatype).handle(); } @@ -342,6 +349,7 @@ void testUpdateColumnPosition() { "users", "name", "first"); + doReturn(mockUpdatePosition).when(mockUpdatePosition).validate(); commandLine.handleCommandLine(); verify(mockUpdatePosition).handle(); } @@ -373,6 +381,7 @@ void testUpdateColumnNullability() { "users", "name", true); + doReturn(mockUpdateNull).when(mockUpdateNull).validate(); commandLine.handleCommandLine(); verify(mockUpdateNull).handle(); } @@ -404,6 +413,7 @@ void testUpdateColumnAutoIncrement() { "users", "name", true); + doReturn(mockUpdateAuto).when(mockUpdateAuto).validate(); commandLine.handleCommandLine(); verify(mockUpdateAuto).handle(); } @@ -439,10 +449,33 @@ void testUpdateColumnDefault() { "name", "Fred Smith", "varchar(100)"); + doReturn(mockUpdateDefault).when(mockUpdateDefault).validate(); commandLine.handleCommandLine(); verify(mockUpdateDefault).handle(); } + @Test + void testUpdateColumnDefaultWithoutDataType() { + Main.useExit = false; + UpdateColumnDefault spyUpdate = + spy( + new UpdateColumnDefault( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + "catalog", + "schema", + "user", + "name", + "", + null)); + + assertThrows(RuntimeException.class, spyUpdate::validate); + verify(spyUpdate, never()).handle(); + String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_DATATYPE, output); + } + @Test @SuppressWarnings("DefaultCharset") void testDeleteColumnCommandWithoutCatalog() { @@ -464,7 +497,7 @@ void testDeleteColumnCommandWithoutCatalog() { output, ErrorMessages.MISSING_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + Joiner.on(", ") .join( Arrays.asList( @@ -496,7 +529,7 @@ void testDeleteColumnCommandWithoutSchema() { output, ErrorMessages.MALFORMED_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + Joiner.on(", ") .join( Arrays.asList( @@ -531,7 +564,7 @@ void testDeleteColumnCommandWithoutTable() { output, ErrorMessages.MALFORMED_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + Joiner.on(", ").join(Arrays.asList(CommandEntities.TABLE, CommandEntities.COLUMN))); } @@ -563,7 +596,7 @@ void testDeleteColumnCommandWithoutColumn() { output, ErrorMessages.MALFORMED_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + Joiner.on(", ").join(Arrays.asList(CommandEntities.COLUMN))); } @@ -588,7 +621,7 @@ void testListColumnCommandWithoutCatalog() { output, ErrorMessages.MISSING_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + Joiner.on(", ") .join( Arrays.asList( @@ -617,7 +650,7 @@ void testListColumnCommandWithoutSchema() { output, ErrorMessages.MALFORMED_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + Joiner.on(", ").join(Arrays.asList(CommandEntities.SCHEMA, CommandEntities.TABLE))); } @@ -643,7 +676,7 @@ void testListColumnCommandWithoutTable() { output, ErrorMessages.MALFORMED_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + CommandEntities.TABLE); } } diff --git a/clients/cli/src/test/java/org/apache/gravitino/cli/TestFilesetCommands.java b/clients/cli/src/test/java/org/apache/gravitino/cli/TestFilesetCommands.java index b46b73cc3dd..3529e60bf77 100644 --- a/clients/cli/src/test/java/org/apache/gravitino/cli/TestFilesetCommands.java +++ b/clients/cli/src/test/java/org/apache/gravitino/cli/TestFilesetCommands.java @@ -92,6 +92,7 @@ void testListFilesetsCommand() { .when(commandLine) .newListFilesets( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", "schema"); + doReturn(mockList).when(mockList).validate(); commandLine.handleCommandLine(); verify(mockList).handle(); } @@ -117,6 +118,7 @@ void testFilesetDetailsCommand() { "catalog", "schema", "fileset"); + doReturn(mockDetails).when(mockDetails).validate(); commandLine.handleCommandLine(); verify(mockDetails).handle(); } @@ -147,6 +149,7 @@ void testCreateFilesetCommand() { eq("fileset"), eq("comment"), any()); + doReturn(mockCreate).when(mockCreate).validate(); commandLine.handleCommandLine(); verify(mockCreate).handle(); } @@ -173,6 +176,7 @@ void testDeleteFilesetCommand() { "catalog", "schema", "fileset"); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -200,6 +204,7 @@ void testDeleteFilesetForceCommand() { "catalog", "schema", "fileset"); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -229,6 +234,7 @@ void testUpdateFilesetCommentCommand() { "schema", "fileset", "new_comment"); + doReturn(mockUpdateComment).when(mockUpdateComment).validate(); commandLine.handleCommandLine(); verify(mockUpdateComment).handle(); } @@ -258,6 +264,7 @@ void testUpdateFilesetNameCommand() { "schema", "fileset", "new_name"); + doReturn(mockUpdateName).when(mockUpdateName).validate(); commandLine.handleCommandLine(); verify(mockUpdateName).handle(); } @@ -284,6 +291,7 @@ void testListFilesetPropertiesCommand() { "catalog", "schema", "fileset"); + doReturn(mockListProperties).when(mockListProperties).validate(); commandLine.handleCommandLine(); verify(mockListProperties).handle(); } @@ -316,10 +324,74 @@ void testSetFilesetPropertyCommand() { "fileset", "property", "value"); + doReturn(mockSetProperties).when(mockSetProperties).validate(); commandLine.handleCommandLine(); verify(mockSetProperties).handle(); } + @Test + void testSetFilesetPropertyCommandWithoutPropertyAndValue() { + Main.useExit = false; + SetFilesetProperty spySetProperty = + spy( + new SetFilesetProperty( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + "catalog", + "schema", + "fileset", + null, + null)); + + assertThrows(RuntimeException.class, spySetProperty::validate); + verify(spySetProperty, never()).handle(); + String errOutput = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_PROPERTY_AND_VALUE, errOutput); + } + + @Test + void testSetFilesetPropertyCommandWithoutProperty() { + Main.useExit = false; + SetFilesetProperty spySetProperty = + spy( + new SetFilesetProperty( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + "catalog", + "schema", + "fileset", + null, + "value")); + + assertThrows(RuntimeException.class, spySetProperty::validate); + verify(spySetProperty, never()).handle(); + String errOutput = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_PROPERTY, errOutput); + } + + @Test + void testSetFilesetPropertyCommandWithoutValue() { + Main.useExit = false; + SetFilesetProperty spySetProperty = + spy( + new SetFilesetProperty( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + "catalog", + "schema", + "fileset", + "property", + null)); + + assertThrows(RuntimeException.class, spySetProperty::validate); + verify(spySetProperty, never()).handle(); + String errOutput = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_VALUE, errOutput); + } + @Test void testRemoveFilesetPropertyCommand() { RemoveFilesetProperty mockSetProperties = mock(RemoveFilesetProperty.class); @@ -345,10 +417,31 @@ void testRemoveFilesetPropertyCommand() { "schema", "fileset", "property"); + doReturn(mockSetProperties).when(mockSetProperties).validate(); commandLine.handleCommandLine(); verify(mockSetProperties).handle(); } + @Test + void testRemoveFilesetPropertyCommandWithoutProperty() { + Main.useExit = false; + RemoveFilesetProperty spyRemoveProperty = + spy( + new RemoveFilesetProperty( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + "catalog", + "schema", + "fileset", + null)); + + assertThrows(RuntimeException.class, spyRemoveProperty::validate); + verify(spyRemoveProperty, never()).handle(); + String errOutput = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_PROPERTY, errOutput); + } + @Test @SuppressWarnings("DefaultCharset") void testListFilesetCommandWithoutCatalog() { @@ -369,7 +462,7 @@ void testListFilesetCommandWithoutCatalog() { output, ErrorMessages.MISSING_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + Joiner.on(", ").join(Arrays.asList(CommandEntities.CATALOG, CommandEntities.SCHEMA))); } @@ -394,7 +487,7 @@ void testListFilesetCommandWithoutSchema() { output, ErrorMessages.MALFORMED_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + Joiner.on(", ").join(Arrays.asList(CommandEntities.SCHEMA))); } @@ -419,7 +512,7 @@ void testFilesetDetailCommandWithoutCatalog() { output, ErrorMessages.MISSING_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + Joiner.on(", ") .join( Arrays.asList( @@ -448,7 +541,7 @@ void testFilesetDetailCommandWithoutSchema() { output, ErrorMessages.MALFORMED_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + Joiner.on(", ").join(Arrays.asList(CommandEntities.SCHEMA, CommandEntities.FILESET))); } @@ -474,7 +567,7 @@ void testFilesetDetailCommandWithoutFileset() { output, ErrorMessages.MALFORMED_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + Joiner.on(", ").join(Arrays.asList(CommandEntities.FILESET))); } } diff --git a/clients/cli/src/test/java/org/apache/gravitino/cli/TestFulllName.java b/clients/cli/src/test/java/org/apache/gravitino/cli/TestFulllName.java index 48ee79cfcc5..f13d6e09201 100644 --- a/clients/cli/src/test/java/org/apache/gravitino/cli/TestFulllName.java +++ b/clients/cli/src/test/java/org/apache/gravitino/cli/TestFulllName.java @@ -47,6 +47,7 @@ public class TestFulllName { @BeforeEach public void setUp() { + Main.useExit = false; options = new GravitinoOptions().options(); System.setOut(new PrintStream(outContent)); System.setErr(new PrintStream(errContent)); @@ -82,8 +83,7 @@ public void entityNotFound() throws Exception { CommandLine commandLine = new DefaultParser().parse(options, args); FullName fullName = new FullName(commandLine); - String metalakeName = fullName.getMetalakeName(); - assertNull(metalakeName); + assertThrows(RuntimeException.class, fullName::getMetalakeName); } @Test @@ -231,8 +231,7 @@ public void testGetMetalakeWithoutMetalakeOption() throws ParseException { String[] args = {"table", "list", "-i", "--name", "Hive_catalog.default"}; CommandLine commandLine = new DefaultParser().parse(options, args); FullName fullName = new FullName(commandLine); - String metalakeName = fullName.getMetalakeName(); - assertNull(metalakeName); + assertThrows(RuntimeException.class, fullName::getMetalakeName); String errOutput = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); assertEquals(errOutput, ErrorMessages.MISSING_METALAKE); } diff --git a/clients/cli/src/test/java/org/apache/gravitino/cli/TestGroupCommands.java b/clients/cli/src/test/java/org/apache/gravitino/cli/TestGroupCommands.java index 98e3ea910fb..ce7a8956821 100644 --- a/clients/cli/src/test/java/org/apache/gravitino/cli/TestGroupCommands.java +++ b/clients/cli/src/test/java/org/apache/gravitino/cli/TestGroupCommands.java @@ -83,6 +83,7 @@ void testListGroupsCommand() { doReturn(mockList) .when(commandLine) .newListGroups(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo"); + doReturn(mockList).when(mockList).validate(); commandLine.handleCommandLine(); verify(mockList).handle(); } @@ -101,6 +102,7 @@ void testGroupDetailsCommand() { doReturn(mockDetails) .when(commandLine) .newGroupDetails(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "groupA"); + doReturn(mockDetails).when(mockDetails).validate(); commandLine.handleCommandLine(); verify(mockDetails).handle(); } @@ -120,6 +122,7 @@ void testGroupAuditCommand() { doReturn(mockAudit) .when(commandLine) .newGroupAudit(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "group"); + doReturn(mockAudit).when(mockAudit).validate(); commandLine.handleCommandLine(); verify(mockAudit).handle(); } @@ -138,6 +141,7 @@ void testCreateGroupCommand() { doReturn(mockCreate) .when(commandLine) .newCreateGroup(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "groupA"); + doReturn(mockCreate).when(mockCreate).validate(); commandLine.handleCommandLine(); verify(mockCreate).handle(); } @@ -156,6 +160,7 @@ void testDeleteGroupCommand() { doReturn(mockDelete) .when(commandLine) .newDeleteGroup(GravitinoCommandLine.DEFAULT_URL, false, false, "metalake_demo", "groupA"); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -175,6 +180,7 @@ void testDeleteGroupForceCommand() { doReturn(mockDelete) .when(commandLine) .newDeleteGroup(GravitinoCommandLine.DEFAULT_URL, false, true, "metalake_demo", "groupA"); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -246,6 +252,8 @@ void testRemoveRolesFromGroupCommand() { .newRemoveRoleFromGroup( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "groupA", "role1"); + doReturn(mockRemoveFirstRole).when(mockRemoveFirstRole).validate(); + doReturn(mockRemoveSecondRole).when(mockRemoveSecondRole).validate(); commandLine.handleCommandLine(); verify(mockRemoveFirstRole).handle(); @@ -279,6 +287,8 @@ void testAddRolesToGroupCommand() { .newAddRoleToGroup( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "groupA", "role1"); + doReturn(mockAddFirstRole).when(mockAddFirstRole).validate(); + doReturn(mockAddSecondRole).when(mockAddSecondRole).validate(); commandLine.handleCommandLine(); verify(mockAddSecondRole).handle(); diff --git a/clients/cli/src/test/java/org/apache/gravitino/cli/TestMain.java b/clients/cli/src/test/java/org/apache/gravitino/cli/TestMain.java index 1d1ffded0ff..c9cd437cf3d 100644 --- a/clients/cli/src/test/java/org/apache/gravitino/cli/TestMain.java +++ b/clients/cli/src/test/java/org/apache/gravitino/cli/TestMain.java @@ -189,7 +189,7 @@ public void CreateTagWithNoTag() { Main.main(args); - assertTrue(errContent.toString().contains(ErrorMessages.TAG_EMPTY)); // Expect error + assertTrue(errContent.toString().contains(ErrorMessages.MISSING_TAG)); // Expect error } @SuppressWarnings("DefaultCharset") @@ -198,6 +198,6 @@ public void DeleteTagWithNoTag() { Main.main(args); - assertTrue(errContent.toString().contains(ErrorMessages.TAG_EMPTY)); // Expect error + assertTrue(errContent.toString().contains(ErrorMessages.MISSING_TAG)); // Expect error } } diff --git a/clients/cli/src/test/java/org/apache/gravitino/cli/TestMetalakeCommands.java b/clients/cli/src/test/java/org/apache/gravitino/cli/TestMetalakeCommands.java index 01eebb6dab5..dae2fe63400 100644 --- a/clients/cli/src/test/java/org/apache/gravitino/cli/TestMetalakeCommands.java +++ b/clients/cli/src/test/java/org/apache/gravitino/cli/TestMetalakeCommands.java @@ -19,6 +19,8 @@ package org.apache.gravitino.cli; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; @@ -29,6 +31,7 @@ import java.io.ByteArrayOutputStream; import java.io.PrintStream; +import java.nio.charset.StandardCharsets; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Options; import org.apache.gravitino.cli.commands.CreateMetalake; @@ -87,6 +90,7 @@ void testListMetalakesCommand() { doReturn(mockList) .when(commandLine) .newListMetalakes(GravitinoCommandLine.DEFAULT_URL, false, null); + doReturn(mockList).when(mockList).validate(); commandLine.handleCommandLine(); verify(mockList).handle(); } @@ -104,6 +108,7 @@ void testMetalakeDetailsCommand() { doReturn(mockDetails) .when(commandLine) .newMetalakeDetails(GravitinoCommandLine.DEFAULT_URL, false, null, "metalake_demo"); + doReturn(mockDetails).when(mockDetails).validate(); commandLine.handleCommandLine(); verify(mockDetails).handle(); } @@ -121,6 +126,7 @@ void testMetalakeAuditCommand() { doReturn(mockAudit) .when(commandLine) .newMetalakeAudit(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo"); + doReturn(mockAudit).when(mockAudit).validate(); commandLine.handleCommandLine(); verify(mockAudit).handle(); } @@ -139,6 +145,7 @@ void testCreateMetalakeCommand() { doReturn(mockCreate) .when(commandLine) .newCreateMetalake(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "comment"); + doReturn(mockCreate).when(mockCreate).validate(); commandLine.handleCommandLine(); verify(mockCreate).handle(); } @@ -155,6 +162,7 @@ void testCreateMetalakeCommandNoComment() { doReturn(mockCreate) .when(commandLine) .newCreateMetalake(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", null); + doReturn(mockCreate).when(mockCreate).validate(); commandLine.handleCommandLine(); verify(mockCreate).handle(); } @@ -171,6 +179,7 @@ void testDeleteMetalakeCommand() { doReturn(mockDelete) .when(commandLine) .newDeleteMetalake(GravitinoCommandLine.DEFAULT_URL, false, false, "metalake_demo"); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -188,6 +197,7 @@ void testDeleteMetalakeForceCommand() { doReturn(mockDelete) .when(commandLine) .newDeleteMetalake(GravitinoCommandLine.DEFAULT_URL, false, true, "metalake_demo"); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -209,10 +219,50 @@ void testSetMetalakePropertyCommand() { .when(commandLine) .newSetMetalakeProperty( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "property", "value"); + doReturn(mockSetProperty).when(mockSetProperty).validate(); commandLine.handleCommandLine(); verify(mockSetProperty).handle(); } + @Test + void testSetMetalakePropertyCommandWithoutPropertyAndValue() { + Main.useExit = false; + SetMetalakeProperty metalakeProperty = + spy( + new SetMetalakeProperty( + GravitinoCommandLine.DEFAULT_URL, false, "demo_metalake", null, null)); + + assertThrows(RuntimeException.class, metalakeProperty::validate); + String errOutput = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals("Missing --property and --value options.", errOutput); + } + + @Test + void testSetMetalakePropertyCommandWithoutProperty() { + Main.useExit = false; + SetMetalakeProperty metalakeProperty = + spy( + new SetMetalakeProperty( + GravitinoCommandLine.DEFAULT_URL, false, "demo_metalake", null, "val1")); + + assertThrows(RuntimeException.class, metalakeProperty::validate); + String errOutput = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_PROPERTY, errOutput); + } + + @Test + void testSetMetalakePropertyCommandWithoutValue() { + Main.useExit = false; + SetMetalakeProperty metalakeProperty = + spy( + new SetMetalakeProperty( + GravitinoCommandLine.DEFAULT_URL, false, "demo_metalake", "property1", null)); + + assertThrows(RuntimeException.class, metalakeProperty::validate); + String errOutput = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_VALUE, errOutput); + } + @Test void testRemoveMetalakePropertyCommand() { RemoveMetalakeProperty mockRemoveProperty = mock(RemoveMetalakeProperty.class); @@ -228,10 +278,24 @@ void testRemoveMetalakePropertyCommand() { .when(commandLine) .newRemoveMetalakeProperty( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "property"); + doReturn(mockRemoveProperty).when(mockRemoveProperty).validate(); commandLine.handleCommandLine(); verify(mockRemoveProperty).handle(); } + @Test + void testRemoveMetalakePropertyCommandWithoutProperty() { + Main.useExit = false; + RemoveMetalakeProperty mockRemoveProperty = + spy( + new RemoveMetalakeProperty( + GravitinoCommandLine.DEFAULT_URL, false, "demo_metalake", null)); + + assertThrows(RuntimeException.class, mockRemoveProperty::validate); + String errOutput = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_PROPERTY, errOutput); + } + @Test void testListMetalakePropertiesCommand() { ListMetalakeProperties mockListProperties = mock(ListMetalakeProperties.class); @@ -244,6 +308,7 @@ void testListMetalakePropertiesCommand() { doReturn(mockListProperties) .when(commandLine) .newListMetalakeProperties(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo"); + doReturn(mockListProperties).when(mockListProperties).validate(); commandLine.handleCommandLine(); verify(mockListProperties).handle(); } @@ -263,6 +328,7 @@ void testUpdateMetalakeCommentCommand() { .when(commandLine) .newUpdateMetalakeComment( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "new comment"); + doReturn(mockUpdateComment).when(mockUpdateComment).validate(); commandLine.handleCommandLine(); verify(mockUpdateComment).handle(); } @@ -282,6 +348,7 @@ void testUpdateMetalakeNameCommand() { .when(commandLine) .newUpdateMetalakeName( GravitinoCommandLine.DEFAULT_URL, false, false, "metalake_demo", "new_name"); + doReturn(mockUpdateName).when(mockUpdateName).validate(); commandLine.handleCommandLine(); verify(mockUpdateName).handle(); } @@ -302,6 +369,7 @@ void testUpdateMetalakeNameForceCommand() { .when(commandLine) .newUpdateMetalakeName( GravitinoCommandLine.DEFAULT_URL, false, true, "metalake_demo", "new_name"); + doReturn(mockUpdateName).when(mockUpdateName).validate(); commandLine.handleCommandLine(); verify(mockUpdateName).handle(); } @@ -319,6 +387,7 @@ void testEnableMetalakeCommand() { doReturn(mockEnable) .when(commandLine) .newMetalakeEnable(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", false); + doReturn(mockEnable).when(mockEnable).validate(); commandLine.handleCommandLine(); verify(mockEnable).handle(); } @@ -337,6 +406,7 @@ void testEnableMetalakeCommandWithRecursive() { doReturn(mockEnable) .when(commandLine) .newMetalakeEnable(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", true); + doReturn(mockEnable).when(mockEnable).validate(); commandLine.handleCommandLine(); verify(mockEnable).handle(); } @@ -355,7 +425,7 @@ void testDisableMetalakeCommand() { doReturn(mockDisable) .when(commandLine) .newMetalakeDisable(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo"); - + doReturn(mockDisable).when(mockDisable).validate(); commandLine.handleCommandLine(); verify(mockDisable).handle(); } @@ -379,6 +449,6 @@ void testMetalakeWithDisableAndEnableOptions() { .newMetalakeEnable(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", false); verify(commandLine, never()) .newMetalakeEnable(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", false); - assertTrue(errContent.toString().contains("Unable to enable and disable at the same time")); + assertTrue(errContent.toString().contains(ErrorMessages.INVALID_ENABLE_DISABLE)); } } diff --git a/clients/cli/src/test/java/org/apache/gravitino/cli/TestModelCommands.java b/clients/cli/src/test/java/org/apache/gravitino/cli/TestModelCommands.java index e486c41a9d1..b83cc3c3136 100644 --- a/clients/cli/src/test/java/org/apache/gravitino/cli/TestModelCommands.java +++ b/clients/cli/src/test/java/org/apache/gravitino/cli/TestModelCommands.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThrows; +import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.isNull; import static org.mockito.Mockito.doReturn; @@ -35,11 +36,15 @@ import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Collections; +import java.util.Map; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Options; +import org.apache.gravitino.cli.commands.DeleteModel; +import org.apache.gravitino.cli.commands.LinkModel; import org.apache.gravitino.cli.commands.ListModel; import org.apache.gravitino.cli.commands.ModelAudit; import org.apache.gravitino.cli.commands.ModelDetails; +import org.apache.gravitino.cli.commands.RegisterModel; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -89,6 +94,7 @@ void testListModelCommand() { eq("metalake_demo"), eq("catalog"), eq("schema")); + doReturn(mockList).when(mockList).validate(); commandLine.handleCommandLine(); verify(mockList).handle(); } @@ -116,7 +122,7 @@ void testListModelCommandWithoutCatalog() { assertEquals( ErrorMessages.MISSING_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + joiner.join(Arrays.asList(CommandEntities.CATALOG, CommandEntities.SCHEMA)), output); } @@ -145,7 +151,7 @@ void testListModelCommandWithoutSchema() { assertEquals( ErrorMessages.MALFORMED_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + joiner.join(Collections.singletonList(CommandEntities.SCHEMA)), output); } @@ -171,6 +177,7 @@ void testModelDetailsCommand() { eq("catalog"), eq("schema"), eq("model")); + doReturn(mockList).when(mockList).validate(); commandLine.handleCommandLine(); verify(mockList).handle(); } @@ -200,7 +207,7 @@ void testModelDetailsCommandWithoutCatalog() { assertEquals( ErrorMessages.MISSING_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + joiner.join( Arrays.asList( CommandEntities.CATALOG, CommandEntities.SCHEMA, CommandEntities.MODEL)), @@ -233,7 +240,7 @@ void testModelDetailsCommandWithoutSchema() { assertEquals( ErrorMessages.MALFORMED_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + joiner.join(Arrays.asList(CommandEntities.SCHEMA, CommandEntities.MODEL)), output); } @@ -264,7 +271,7 @@ void testModelDetailsCommandWithoutModel() { assertEquals( ErrorMessages.MALFORMED_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + joiner.join(Collections.singletonList(CommandEntities.MODEL)), output); } @@ -286,7 +293,314 @@ void testModelAuditCommand() { .when(commandLine) .newModelAudit( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", "schema", "model"); + doReturn(mockAudit).when(mockAudit).validate(); commandLine.handleCommandLine(); verify(mockAudit).handle(); } + + @Test + void testRegisterModelCommand() { + RegisterModel mockCreate = mock(RegisterModel.class); + when(mockCommandLine.hasOption(GravitinoOptions.METALAKE)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.METALAKE)).thenReturn("metalake_demo"); + when(mockCommandLine.hasOption(GravitinoOptions.NAME)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.NAME)).thenReturn("catalog.schema.model"); + when(mockCommandLine.hasOption(GravitinoOptions.PROPERTIES)).thenReturn(false); + when(mockCommandLine.hasOption(GravitinoOptions.COMMENT)).thenReturn(false); + + GravitinoCommandLine commandLine = + spy( + new GravitinoCommandLine( + mockCommandLine, mockOptions, CommandEntities.MODEL, CommandActions.CREATE)); + doReturn(mockCreate) + .when(commandLine) + .newCreateModel( + eq(GravitinoCommandLine.DEFAULT_URL), + eq(false), + eq("metalake_demo"), + eq("catalog"), + eq("schema"), + eq("model"), + isNull(), + argThat(Map::isEmpty)); + doReturn(mockCreate).when(mockCreate).validate(); + commandLine.handleCommandLine(); + verify(mockCreate).handle(); + } + + @Test + void testRegisterModelCommandWithComment() { + RegisterModel mockCreate = mock(RegisterModel.class); + when(mockCommandLine.hasOption(GravitinoOptions.METALAKE)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.METALAKE)).thenReturn("metalake_demo"); + when(mockCommandLine.hasOption(GravitinoOptions.NAME)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.NAME)).thenReturn("catalog.schema.model"); + when(mockCommandLine.hasOption(GravitinoOptions.PROPERTIES)).thenReturn(false); + when(mockCommandLine.hasOption(GravitinoOptions.COMMENT)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.COMMENT)).thenReturn("comment"); + GravitinoCommandLine commandLine = + spy( + new GravitinoCommandLine( + mockCommandLine, mockOptions, CommandEntities.MODEL, CommandActions.CREATE)); + doReturn(mockCreate) + .when(commandLine) + .newCreateModel( + eq(GravitinoCommandLine.DEFAULT_URL), + eq(false), + eq("metalake_demo"), + eq("catalog"), + eq("schema"), + eq("model"), + eq("comment"), + argThat(Map::isEmpty)); + doReturn(mockCreate).when(mockCreate).validate(); + commandLine.handleCommandLine(); + verify(mockCreate).handle(); + } + + @Test + void testRegisterModelCommandWithProperties() { + RegisterModel mockCreate = mock(RegisterModel.class); + when(mockCommandLine.hasOption(GravitinoOptions.METALAKE)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.METALAKE)).thenReturn("metalake_demo"); + when(mockCommandLine.hasOption(GravitinoOptions.NAME)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.NAME)).thenReturn("catalog.schema.model"); + when(mockCommandLine.hasOption(GravitinoOptions.PROPERTIES)).thenReturn(true); + when(mockCommandLine.getOptionValues(GravitinoOptions.PROPERTIES)) + .thenReturn(new String[] {"key1=val1", "key2" + "=val2"}); + when(mockCommandLine.hasOption(GravitinoOptions.COMMENT)).thenReturn(false); + GravitinoCommandLine commandLine = + spy( + new GravitinoCommandLine( + mockCommandLine, mockOptions, CommandEntities.MODEL, CommandActions.CREATE)); + + doReturn(mockCreate) + .when(commandLine) + .newCreateModel( + eq(GravitinoCommandLine.DEFAULT_URL), + eq(false), + eq("metalake_demo"), + eq("catalog"), + eq("schema"), + eq("model"), + isNull(), + argThat( + argument -> + argument.size() == 2 + && argument.containsKey("key1") + && argument.get("key1").equals("val1"))); + doReturn(mockCreate).when(mockCreate).validate(); + commandLine.handleCommandLine(); + verify(mockCreate).handle(); + } + + @Test + void testRegisterModelCommandWithCommentAndProperties() { + RegisterModel mockCreate = mock(RegisterModel.class); + when(mockCommandLine.hasOption(GravitinoOptions.METALAKE)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.METALAKE)).thenReturn("metalake_demo"); + when(mockCommandLine.hasOption(GravitinoOptions.NAME)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.NAME)).thenReturn("catalog.schema.model"); + when(mockCommandLine.hasOption(GravitinoOptions.PROPERTIES)).thenReturn(true); + when(mockCommandLine.getOptionValues(GravitinoOptions.PROPERTIES)) + .thenReturn(new String[] {"key1=val1", "key2" + "=val2"}); + when(mockCommandLine.hasOption(GravitinoOptions.COMMENT)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.COMMENT)).thenReturn("comment"); + GravitinoCommandLine commandLine = + spy( + new GravitinoCommandLine( + mockCommandLine, mockOptions, CommandEntities.MODEL, CommandActions.CREATE)); + + doReturn(mockCreate) + .when(commandLine) + .newCreateModel( + eq(GravitinoCommandLine.DEFAULT_URL), + eq(false), + eq("metalake_demo"), + eq("catalog"), + eq("schema"), + eq("model"), + eq("comment"), + argThat( + argument -> + argument.size() == 2 + && argument.containsKey("key1") + && argument.get("key1").equals("val1"))); + doReturn(mockCreate).when(mockCreate).validate(); + commandLine.handleCommandLine(); + verify(mockCreate).handle(); + } + + @Test + void testDeleteModelCommand() { + DeleteModel mockDelete = mock(DeleteModel.class); + when(mockCommandLine.hasOption(GravitinoOptions.METALAKE)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.METALAKE)).thenReturn("metalake_demo"); + when(mockCommandLine.hasOption(GravitinoOptions.NAME)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.NAME)).thenReturn("catalog.schema.model"); + + GravitinoCommandLine commandLine = + spy( + new GravitinoCommandLine( + mockCommandLine, mockOptions, CommandEntities.MODEL, CommandActions.DELETE)); + doReturn(mockDelete) + .when(commandLine) + .newDeleteModel( + GravitinoCommandLine.DEFAULT_URL, + false, + false, + "metalake_demo", + "catalog", + "schema", + "model"); + doReturn(mockDelete).when(mockDelete).validate(); + commandLine.handleCommandLine(); + verify(mockDelete).handle(); + } + + @Test + void testLinkModelCommandWithoutAlias() { + LinkModel linkModelMock = mock(LinkModel.class); + when(mockCommandLine.hasOption(GravitinoOptions.METALAKE)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.METALAKE)).thenReturn("metalake_demo"); + when(mockCommandLine.hasOption(GravitinoOptions.NAME)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.NAME)).thenReturn("catalog.schema.model"); + when(mockCommandLine.hasOption(GravitinoOptions.URI)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.URI)).thenReturn("file:///tmp/file"); + when(mockCommandLine.hasOption(GravitinoOptions.ALIAS)).thenReturn(false); + GravitinoCommandLine commandLine = + spy( + new GravitinoCommandLine( + mockCommandLine, mockOptions, CommandEntities.MODEL, CommandActions.UPDATE)); + + doReturn(linkModelMock) + .when(commandLine) + .newLinkModel( + eq(GravitinoCommandLine.DEFAULT_URL), + eq(false), + eq("metalake_demo"), + eq("catalog"), + eq("schema"), + eq("model"), + eq("file:///tmp/file"), + isNull(), + isNull(), + argThat(Map::isEmpty)); + doReturn(linkModelMock).when(linkModelMock).validate(); + commandLine.handleCommandLine(); + verify(linkModelMock).handle(); + } + + @Test + void testLinkModelCommandWithAlias() { + LinkModel linkModelMock = mock(LinkModel.class); + when(mockCommandLine.hasOption(GravitinoOptions.METALAKE)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.METALAKE)).thenReturn("metalake_demo"); + when(mockCommandLine.hasOption(GravitinoOptions.NAME)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.NAME)).thenReturn("catalog.schema.model"); + when(mockCommandLine.hasOption(GravitinoOptions.URI)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.URI)).thenReturn("file:///tmp/file"); + when(mockCommandLine.hasOption(GravitinoOptions.ALIAS)).thenReturn(true); + when(mockCommandLine.getOptionValues(GravitinoOptions.ALIAS)) + .thenReturn(new String[] {"aliasA", "aliasB"}); + GravitinoCommandLine commandLine = + spy( + new GravitinoCommandLine( + mockCommandLine, mockOptions, CommandEntities.MODEL, CommandActions.UPDATE)); + + doReturn(linkModelMock) + .when(commandLine) + .newLinkModel( + eq(GravitinoCommandLine.DEFAULT_URL), + eq(false), + eq("metalake_demo"), + eq("catalog"), + eq("schema"), + eq("model"), + eq("file:///tmp/file"), + argThat( + argument -> + argument.length == 2 + && "aliasA".equals(argument[0]) + && "aliasB".equals(argument[1])), + isNull(), + argThat(Map::isEmpty)); + doReturn(linkModelMock).when(linkModelMock).validate(); + commandLine.handleCommandLine(); + verify(linkModelMock).handle(); + } + + @Test + void testLinkModelCommandWithoutURI() { + Main.useExit = false; + + LinkModel spyLinkModel = + spy( + new LinkModel( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + "catalog", + "schema", + "model", + null, + new String[] {"aliasA", "aliasB"}, + "comment", + Collections.EMPTY_MAP)); + + assertThrows(RuntimeException.class, spyLinkModel::validate); + verify(spyLinkModel, never()).handle(); + String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_URI, output); + } + + @Test + void testLinkModelCommandWithAllComponent() { + LinkModel linkModelMock = mock(LinkModel.class); + when(mockCommandLine.hasOption(GravitinoOptions.METALAKE)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.METALAKE)).thenReturn("metalake_demo"); + when(mockCommandLine.hasOption(GravitinoOptions.NAME)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.NAME)).thenReturn("catalog.schema.model"); + when(mockCommandLine.hasOption(GravitinoOptions.URI)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.URI)).thenReturn("file:///tmp/file"); + when(mockCommandLine.hasOption(GravitinoOptions.ALIAS)).thenReturn(true); + when(mockCommandLine.getOptionValues(GravitinoOptions.ALIAS)) + .thenReturn(new String[] {"aliasA", "aliasB"}); + when(mockCommandLine.hasOption(GravitinoOptions.COMMENT)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.COMMENT)).thenReturn("comment"); + when(mockCommandLine.hasOption(GravitinoOptions.PROPERTIES)).thenReturn(true); + when(mockCommandLine.getOptionValues(GravitinoOptions.PROPERTIES)) + .thenReturn(new String[] {"key1=val1", "key2" + "=val2"}); + GravitinoCommandLine commandLine = + spy( + new GravitinoCommandLine( + mockCommandLine, mockOptions, CommandEntities.MODEL, CommandActions.UPDATE)); + + doReturn(linkModelMock) + .when(commandLine) + .newLinkModel( + eq(GravitinoCommandLine.DEFAULT_URL), + eq(false), + eq("metalake_demo"), + eq("catalog"), + eq("schema"), + eq("model"), + eq("file:///tmp/file"), + argThat( + argument -> + argument.length == 2 + && "aliasA".equals(argument[0]) + && "aliasB".equals(argument[1])), + eq("comment"), + argThat( + argument -> + argument.size() == 2 + && argument.containsKey("key1") + && argument.containsKey("key2") + && "val1".equals(argument.get("key1")) + && "val2".equals(argument.get("key2")))); + doReturn(linkModelMock).when(linkModelMock).validate(); + commandLine.handleCommandLine(); + verify(linkModelMock).handle(); + } } diff --git a/clients/cli/src/test/java/org/apache/gravitino/cli/TestParseType.java b/clients/cli/src/test/java/org/apache/gravitino/cli/TestParseType.java index c53d3c2bdcd..6c9132dbf4b 100644 --- a/clients/cli/src/test/java/org/apache/gravitino/cli/TestParseType.java +++ b/clients/cli/src/test/java/org/apache/gravitino/cli/TestParseType.java @@ -19,49 +19,85 @@ package org.apache.gravitino.cli; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import org.apache.gravitino.rel.types.Type; +import org.apache.gravitino.rel.types.Types; import org.junit.jupiter.api.Test; public class TestParseType { @Test - public void testParseVarcharWithLength() { - ParsedType parsed = ParseType.parse("varchar(10)"); - assertNotNull(parsed); - assertEquals("varchar", parsed.getTypeName()); - assertEquals(10, parsed.getLength()); - assertNull(parsed.getScale()); - assertNull(parsed.getPrecision()); + public void testParseTypeVarcharWithLength() { + Type type = ParseType.toType("varchar(10)"); + assertThat(type, instanceOf(Types.VarCharType.class)); + assertEquals(10, ((Types.VarCharType) type).length()); } @Test - public void testParseDecimalWithPrecisionAndScale() { - ParsedType parsed = ParseType.parse("decimal(10,5)"); - assertNotNull(parsed); - assertEquals("decimal", parsed.getTypeName()); - assertEquals(10, parsed.getPrecision()); - assertEquals(5, parsed.getScale()); - assertNull(parsed.getLength()); + public void testParseTypeDecimalWithPrecisionAndScale() { + Type type = ParseType.toType("decimal(10,5)"); + assertThat(type, instanceOf(Types.DecimalType.class)); + assertEquals(10, ((Types.DecimalType) type).precision()); + assertEquals(5, ((Types.DecimalType) type).scale()); } @Test - public void testParseIntegerWithoutParameters() { - ParsedType parsed = ParseType.parse("int()"); - assertNull(parsed); // Expect null because the format is unsupported + public void testParseTypeListValidInput() { + Type type = ParseType.toType("list(integer)"); + assertThat(type, instanceOf(Types.ListType.class)); + Type elementType = ((Types.ListType) type).elementType(); + assertThat(elementType, instanceOf(Types.IntegerType.class)); } @Test - public void testParseOrdinaryInput() { - assertNull(ParseType.parse("string")); - assertNull(ParseType.parse("int")); + public void testParseTypeListMalformedInput() { + assertThrows(IllegalArgumentException.class, () -> ParseType.toType("list()")); + assertThrows(IllegalArgumentException.class, () -> ParseType.toType("list(10)")); + assertThrows(IllegalArgumentException.class, () -> ParseType.toType("list(unknown)")); + assertThrows(IllegalArgumentException.class, () -> ParseType.toType("list(integer,integer)")); + assertThrows(IllegalArgumentException.class, () -> ParseType.toType("list(integer")); } @Test - public void testParseMalformedInput() { - assertNull(ParseType.parse("varchar(-10)")); - assertNull(ParseType.parse("decimal(10,abc)")); + public void testParseTypeMapValidInput() { + Type type = ParseType.toType("map(string,integer)"); + assertThat(type, instanceOf(Types.MapType.class)); + Type keyType = ((Types.MapType) type).keyType(); + Type valueType = ((Types.MapType) type).valueType(); + assertThat(keyType, instanceOf(Types.StringType.class)); + assertThat(valueType, instanceOf(Types.IntegerType.class)); + } + + @Test + public void testParseTypeMapMalformedInput() { + assertThrows(IllegalArgumentException.class, () -> ParseType.toType("map()")); + assertThrows(IllegalArgumentException.class, () -> ParseType.toType("map(10,10)")); + assertThrows(IllegalArgumentException.class, () -> ParseType.toType("map(unknown,unknown)")); + assertThrows(IllegalArgumentException.class, () -> ParseType.toType("map(string)")); + assertThrows( + IllegalArgumentException.class, () -> ParseType.toType("map(string,integer,integer)")); + assertThrows(IllegalArgumentException.class, () -> ParseType.toType("map(string,integer")); + } + + @Test + public void testParseTypeIntegerWithoutParameters() { + assertThrows(IllegalArgumentException.class, () -> ParseType.toType("int()")); + } + + @Test + public void testParseTypeOrdinaryInput() { + assertNull(ParseType.parseBasicType("string")); + assertNull(ParseType.parseBasicType("int")); + } + + @Test + public void testParseTypeMalformedInput() { + assertThrows(IllegalArgumentException.class, () -> ParseType.toType("varchar(-10)")); + assertThrows(IllegalArgumentException.class, () -> ParseType.toType("decimal(10,abc)")); } } diff --git a/clients/cli/src/test/java/org/apache/gravitino/cli/TestRoleCommands.java b/clients/cli/src/test/java/org/apache/gravitino/cli/TestRoleCommands.java index 0e671067e3f..529979582ff 100644 --- a/clients/cli/src/test/java/org/apache/gravitino/cli/TestRoleCommands.java +++ b/clients/cli/src/test/java/org/apache/gravitino/cli/TestRoleCommands.java @@ -80,6 +80,7 @@ void testListRolesCommand() { doReturn(mockList) .when(commandLine) .newListRoles(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo"); + doReturn(mockList).when(mockList).validate(); commandLine.handleCommandLine(); verify(mockList).handle(); } @@ -98,12 +99,14 @@ void testRoleDetailsCommand() { doReturn(mockDetails) .when(commandLine) .newRoleDetails(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "admin"); + doReturn(mockDetails).when(mockDetails).validate(); commandLine.handleCommandLine(); verify(mockDetails).handle(); } @Test void testRoleDetailsCommandWithMultipleRoles() { + Main.useExit = false; when(mockCommandLine.hasOption(GravitinoOptions.METALAKE)).thenReturn(true); when(mockCommandLine.getOptionValue(GravitinoOptions.METALAKE)).thenReturn("metalake_demo"); when(mockCommandLine.hasOption(GravitinoOptions.ROLE)).thenReturn(true); @@ -114,7 +117,7 @@ void testRoleDetailsCommandWithMultipleRoles() { new GravitinoCommandLine( mockCommandLine, mockOptions, CommandEntities.ROLE, CommandActions.DETAILS)); - assertThrows(IllegalArgumentException.class, commandLine::handleCommandLine); + assertThrows(RuntimeException.class, commandLine::handleCommandLine); verify(commandLine, never()) .newRoleDetails( eq(GravitinoCommandLine.DEFAULT_URL), eq(false), eq("metalake_demo"), any()); @@ -135,6 +138,7 @@ void testRoleAuditCommand() { doReturn(mockAudit) .when(commandLine) .newRoleAudit(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "group"); + doReturn(mockAudit).when(mockAudit).validate(); commandLine.handleCommandLine(); verify(mockAudit).handle(); } @@ -154,6 +158,7 @@ void testCreateRoleCommand() { .when(commandLine) .newCreateRole( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", new String[] {"admin"}); + doReturn(mockCreate).when(mockCreate).validate(); commandLine.handleCommandLine(); verify(mockCreate).handle(); } @@ -178,6 +183,7 @@ void testCreateRolesCommand() { eq(false), eq("metalake_demo"), eq(new String[] {"admin", "engineer", "scientist"})); + doReturn(mockCreate).when(mockCreate).validate(); commandLine.handleCommandLine(); verify(mockCreate).handle(); } @@ -201,6 +207,7 @@ void testDeleteRoleCommand() { false, "metalake_demo", new String[] {"admin"}); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -227,6 +234,7 @@ void testDeleteRolesCommand() { eq(false), eq("metalake_demo"), eq(new String[] {"admin", "engineer", "scientist"})); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -247,6 +255,7 @@ void testDeleteRoleForceCommand() { .when(commandLine) .newDeleteRole( GravitinoCommandLine.DEFAULT_URL, false, true, "metalake_demo", new String[] {"admin"}); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -276,10 +285,24 @@ void testGrantPrivilegesToRole() { eq("admin"), any(), eq(privileges)); + doReturn(mockGrant).when(mockGrant).validate(); commandLine.handleCommandLine(); verify(mockGrant).handle(); } + @Test + void testGrantPrivilegesToRoleWithoutPrivileges() { + Main.useExit = false; + GrantPrivilegesToRole spyGrantRole = + spy( + new GrantPrivilegesToRole( + GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "admin", null, null)); + assertThrows(RuntimeException.class, spyGrantRole::validate); + verify(spyGrantRole, never()).handle(); + String errOutput = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_PRIVILEGES, errOutput); + } + @Test void testRevokePrivilegesFromRole() { RevokePrivilegesFromRole mockRevoke = mock(RevokePrivilegesFromRole.class); @@ -305,10 +328,24 @@ void testRevokePrivilegesFromRole() { eq("admin"), any(), eq(privileges)); + doReturn(mockRevoke).when(mockRevoke).validate(); commandLine.handleCommandLine(); verify(mockRevoke).handle(); } + @Test + void testRevokePrivilegesFromRoleWithoutPrivileges() { + Main.useExit = false; + RevokePrivilegesFromRole spyGrantRole = + spy( + new RevokePrivilegesFromRole( + GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "admin", null, null)); + assertThrows(RuntimeException.class, spyGrantRole::validate); + verify(spyGrantRole, never()).handle(); + String errOutput = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_PRIVILEGES, errOutput); + } + @Test void testDeleteRoleCommandWithoutRole() { Main.useExit = false; diff --git a/clients/cli/src/test/java/org/apache/gravitino/cli/TestSchemaCommands.java b/clients/cli/src/test/java/org/apache/gravitino/cli/TestSchemaCommands.java index 190e866355b..6b8770d8edf 100644 --- a/clients/cli/src/test/java/org/apache/gravitino/cli/TestSchemaCommands.java +++ b/clients/cli/src/test/java/org/apache/gravitino/cli/TestSchemaCommands.java @@ -19,6 +19,7 @@ package org.apache.gravitino.cli; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.doReturn; @@ -30,6 +31,7 @@ import java.io.ByteArrayOutputStream; import java.io.PrintStream; +import java.nio.charset.StandardCharsets; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Options; import org.apache.gravitino.cli.commands.CreateSchema; @@ -86,6 +88,7 @@ void testListSchemasCommand() { doReturn(mockList) .when(commandLine) .newListSchema(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog"); + doReturn(mockList).when(mockList).validate(); commandLine.handleCommandLine(); verify(mockList).handle(); } @@ -106,6 +109,7 @@ void testSchemaDetailsCommand() { .when(commandLine) .newSchemaDetails( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", "schema"); + doReturn(mockDetails).when(mockDetails).validate(); commandLine.handleCommandLine(); verify(mockDetails).handle(); } @@ -126,6 +130,7 @@ void testSchemaAuditCommand() { .when(commandLine) .newSchemaAudit( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", "schema"); + doReturn(mockAudit).when(mockAudit).validate(); commandLine.handleCommandLine(); verify(mockAudit).handle(); } @@ -153,6 +158,7 @@ void testCreateSchemaCommand() { "catalog", "schema", "comment"); + doReturn(mockCreate).when(mockCreate).validate(); commandLine.handleCommandLine(); verify(mockCreate).handle(); } @@ -172,6 +178,7 @@ void testDeleteSchemaCommand() { .when(commandLine) .newDeleteSchema( GravitinoCommandLine.DEFAULT_URL, false, false, "metalake_demo", "catalog", "schema"); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -192,6 +199,7 @@ void testDeleteSchemaForceCommand() { .when(commandLine) .newDeleteSchema( GravitinoCommandLine.DEFAULT_URL, false, true, "metalake_demo", "catalog", "schema"); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -221,10 +229,71 @@ void testSetSchemaPropertyCommand() { "schema", "property", "value"); + doReturn(mockSetProperty).when(mockSetProperty).validate(); commandLine.handleCommandLine(); verify(mockSetProperty).handle(); } + @Test + void testSetSchemaPropertyCommandWithoutPropertyAndValue() { + Main.useExit = false; + SetSchemaProperty spySetProperty = + spy( + new SetSchemaProperty( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + "catalog", + "schema", + null, + null)); + + assertThrows(RuntimeException.class, spySetProperty::validate); + verify(spySetProperty, never()).handle(); + String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_PROPERTY_AND_VALUE, output); + } + + @Test + void testSetSchemaPropertyCommandWithoutProperty() { + Main.useExit = false; + SetSchemaProperty spySetProperty = + spy( + new SetSchemaProperty( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + "catalog", + "schema", + null, + "value")); + + assertThrows(RuntimeException.class, spySetProperty::validate); + verify(spySetProperty, never()).handle(); + String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_PROPERTY, output); + } + + @Test + void testSetSchemaPropertyCommandWithoutValue() { + Main.useExit = false; + SetSchemaProperty spySetProperty = + spy( + new SetSchemaProperty( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + "catalog", + "schema", + "property", + null)); + + assertThrows(RuntimeException.class, spySetProperty::validate); + verify(spySetProperty, never()).handle(); + String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_VALUE, output); + } + @Test void testRemoveSchemaPropertyCommand() { RemoveSchemaProperty mockRemoveProperty = mock(RemoveSchemaProperty.class); @@ -247,10 +316,29 @@ void testRemoveSchemaPropertyCommand() { "catalog", "schema", "property"); + doReturn(mockRemoveProperty).when(mockRemoveProperty).validate(); commandLine.handleCommandLine(); verify(mockRemoveProperty).handle(); } + @Test + void testRemoveSchemaPropertyCommandWithoutProperty() { + Main.useExit = false; + RemoveSchemaProperty mockRemoveProperty = + spy( + new RemoveSchemaProperty( + GravitinoCommandLine.DEFAULT_URL, + false, + "demo_metalake", + "catalog", + "schema", + null)); + + assertThrows(RuntimeException.class, mockRemoveProperty::validate); + String errOutput = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_PROPERTY, errOutput); + } + @Test void testListSchemaPropertiesCommand() { ListSchemaProperties mockListProperties = mock(ListSchemaProperties.class); @@ -266,6 +354,7 @@ void testListSchemaPropertiesCommand() { .when(commandLine) .newListSchemaProperties( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", "schema"); + doReturn(mockListProperties).when(mockListProperties).validate(); commandLine.handleCommandLine(); verify(mockListProperties).handle(); } @@ -287,7 +376,7 @@ void testListSchemaWithoutCatalog() { verify(commandLine, never()) .newListSchema(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", null); assertTrue( - errContent.toString().contains("Missing required argument(s): " + CommandEntities.CATALOG)); + errContent.toString().contains(ErrorMessages.MISSING_ENTITIES + CommandEntities.CATALOG)); } @Test @@ -308,7 +397,7 @@ void testDetailsSchemaWithoutCatalog() { errContent .toString() .contains( - "Missing required argument(s): " + ErrorMessages.MISSING_ENTITIES + CommandEntities.CATALOG + ", " + CommandEntities.SCHEMA)); @@ -330,6 +419,6 @@ void testDetailsSchemaWithoutSchema() { assertThrows(RuntimeException.class, commandLine::handleCommandLine); assertTrue( - errContent.toString().contains("Missing required argument(s): " + CommandEntities.SCHEMA)); + errContent.toString().contains(ErrorMessages.MISSING_ENTITIES + CommandEntities.SCHEMA)); } } diff --git a/clients/cli/src/test/java/org/apache/gravitino/cli/TestTableCommands.java b/clients/cli/src/test/java/org/apache/gravitino/cli/TestTableCommands.java index c4a8223dd48..f0683320457 100644 --- a/clients/cli/src/test/java/org/apache/gravitino/cli/TestTableCommands.java +++ b/clients/cli/src/test/java/org/apache/gravitino/cli/TestTableCommands.java @@ -95,6 +95,7 @@ void testListTablesCommand() { .when(commandLine) .newListTables( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", "schema"); + doReturn(mockList).when(mockList).validate(); commandLine.handleCommandLine(); verify(mockList).handle(); } @@ -115,6 +116,7 @@ void testTableDetailsCommand() { .when(commandLine) .newTableDetails( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", "schema", "users"); + doReturn(mockDetails).when(mockDetails).validate(); commandLine.handleCommandLine(); verify(mockDetails).handle(); } @@ -135,6 +137,7 @@ void testTableIndexCommand() { .when(commandLine) .newListIndexes( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", "schema", "users"); + doReturn(mockIndex).when(mockIndex).validate(); commandLine.handleCommandLine(); verify(mockIndex).handle(); } @@ -155,6 +158,7 @@ void testTablePartitionCommand() { .when(commandLine) .newTablePartition( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", "schema", "users"); + doReturn(mockPartition).when(mockPartition).validate(); commandLine.handleCommandLine(); verify(mockPartition).handle(); } @@ -175,6 +179,7 @@ void testTableDistributionCommand() { .when(commandLine) .newTableDistribution( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", "schema", "users"); + doReturn(mockDistribution).when(mockDistribution).validate(); commandLine.handleCommandLine(); verify(mockDistribution).handle(); } @@ -197,7 +202,7 @@ void testTableSortOrderCommand() { .when(commandLine) .newTableSortOrder( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", "schema", "users"); - + doReturn(mockSortOrder).when(mockSortOrder).validate(); commandLine.handleCommandLine(); verify(mockSortOrder).handle(); } @@ -218,6 +223,7 @@ void testTableAuditCommand() { .when(commandLine) .newTableAudit( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", "schema", "users"); + doReturn(mockAudit).when(mockAudit).validate(); commandLine.handleCommandLine(); verify(mockAudit).handle(); } @@ -243,6 +249,7 @@ void testDeleteTableCommand() { "catalog", "schema", "users"); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -269,6 +276,7 @@ void testDeleteTableForceCommand() { "catalog", "schema", "users"); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -289,12 +297,13 @@ void testListTablePropertiesCommand() { .when(commandLine) .newListTableProperties( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", "schema", "users"); + doReturn(mockListProperties).when(mockListProperties).validate(); commandLine.handleCommandLine(); verify(mockListProperties).handle(); } @Test - void testSetFilesetPropertyCommand() { + void testSetTablePropertyCommand() { SetTableProperty mockSetProperties = mock(SetTableProperty.class); when(mockCommandLine.hasOption(GravitinoOptions.METALAKE)).thenReturn(true); @@ -320,10 +329,74 @@ void testSetFilesetPropertyCommand() { "user", "property", "value"); + doReturn(mockSetProperties).when(mockSetProperties).validate(); commandLine.handleCommandLine(); verify(mockSetProperties).handle(); } + @Test + void testSetTablePropertyCommandWithoutPropertyAndValue() { + Main.useExit = false; + SetTableProperty spySetProperty = + spy( + new SetTableProperty( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + "catalog", + "schema", + "table", + null, + null)); + + assertThrows(RuntimeException.class, spySetProperty::validate); + verify(spySetProperty, never()).handle(); + String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_PROPERTY_AND_VALUE, output); + } + + @Test + void testSetTablePropertyCommandWithoutProperty() { + Main.useExit = false; + SetTableProperty spySetProperty = + spy( + new SetTableProperty( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + "catalog", + "schema", + "table", + null, + "value")); + + assertThrows(RuntimeException.class, spySetProperty::validate); + verify(spySetProperty, never()).handle(); + String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_PROPERTY, output); + } + + @Test + void testSetTablePropertyCommandWithoutValue() { + Main.useExit = false; + SetTableProperty spySetProperty = + spy( + new SetTableProperty( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + "catalog", + "schema", + "table", + "property", + null)); + + assertThrows(RuntimeException.class, spySetProperty::validate); + verify(spySetProperty, never()).handle(); + String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_VALUE, output); + } + @Test void testRemoveTablePropertyCommand() { RemoveTableProperty mockSetProperties = mock(RemoveTableProperty.class); @@ -348,10 +421,31 @@ void testRemoveTablePropertyCommand() { "schema", "users", "property"); + doReturn(mockSetProperties).when(mockSetProperties).validate(); commandLine.handleCommandLine(); verify(mockSetProperties).handle(); } + @Test + void testRemoveTablePropertyCommandWithoutProperty() { + Main.useExit = false; + RemoveTableProperty spyRemoveProperty = + spy( + new RemoveTableProperty( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + "catalog", + "schema", + "table", + null)); + + assertThrows(RuntimeException.class, spyRemoveProperty::validate); + verify(spyRemoveProperty, never()).handle(); + String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_PROPERTY, output); + } + @Test void testUpdateTableCommentsCommand() { UpdateTableComment mockUpdate = mock(UpdateTableComment.class); @@ -375,6 +469,7 @@ void testUpdateTableCommentsCommand() { "schema", "users", "New comment"); + doReturn(mockUpdate).when(mockUpdate).validate(); commandLine.handleCommandLine(); verify(mockUpdate).handle(); } @@ -402,6 +497,7 @@ void testupdateTableNmeCommand() { "schema", "users", "people"); + doReturn(mockUpdate).when(mockUpdate).validate(); commandLine.handleCommandLine(); verify(mockUpdate).handle(); } @@ -432,10 +528,32 @@ void testCreateTable() { "users", "users.csv", "comment"); + doReturn(mockCreate).when(mockCreate).validate(); commandLine.handleCommandLine(); verify(mockCreate).handle(); } + @Test + void testCreateTableWithoutFile() { + Main.useExit = false; + CreateTable spyCreate = + spy( + new CreateTable( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + "catalog", + "schema", + "table", + null, + "comment")); + + assertThrows(RuntimeException.class, spyCreate::validate); + verify(spyCreate, never()).handle(); + String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_COLUMN_FILE, output); + } + @Test @SuppressWarnings("DefaultCharset") void testListTableWithoutCatalog() { @@ -457,7 +575,7 @@ void testListTableWithoutCatalog() { output, ErrorMessages.MISSING_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + CommandEntities.CATALOG + ", " + CommandEntities.SCHEMA); @@ -485,7 +603,7 @@ void testListTableWithoutSchema() { output, ErrorMessages.MALFORMED_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + CommandEntities.SCHEMA); } @@ -510,7 +628,7 @@ void testDetailTableWithoutCatalog() { output, ErrorMessages.MISSING_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + CommandEntities.CATALOG + ", " + CommandEntities.SCHEMA @@ -539,7 +657,7 @@ void testDetailTableWithoutSchema() { output, ErrorMessages.MALFORMED_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + CommandEntities.SCHEMA + ", " + CommandEntities.TABLE); @@ -568,7 +686,7 @@ void testDetailTableWithoutTable() { output, ErrorMessages.MALFORMED_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + CommandEntities.TABLE); } } diff --git a/clients/cli/src/test/java/org/apache/gravitino/cli/TestTagCommands.java b/clients/cli/src/test/java/org/apache/gravitino/cli/TestTagCommands.java index 74932ca87b3..a94ccee7daa 100644 --- a/clients/cli/src/test/java/org/apache/gravitino/cli/TestTagCommands.java +++ b/clients/cli/src/test/java/org/apache/gravitino/cli/TestTagCommands.java @@ -22,7 +22,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThrows; import static org.mockito.ArgumentMatchers.argThat; -import static org.mockito.ArgumentMatchers.isNull; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.eq; @@ -95,6 +94,7 @@ void testListTagsCommand() { doReturn(mockList) .when(commandLine) .newListTags(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo"); + doReturn(mockList).when(mockList).validate(); commandLine.handleCommandLine(); verify(mockList).handle(); } @@ -113,10 +113,30 @@ void testTagDetailsCommand() { doReturn(mockDetails) .when(commandLine) .newTagDetails(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "tagA"); + doReturn(mockDetails).when(mockDetails).validate(); commandLine.handleCommandLine(); verify(mockDetails).handle(); } + @Test + void testTagDetailsCommandWithMultipleTag() { + Main.useExit = false; + when(mockCommandLine.hasOption(GravitinoOptions.METALAKE)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.METALAKE)).thenReturn("metalake_demo"); + when(mockCommandLine.getOptionValues(GravitinoOptions.TAG)) + .thenReturn(new String[] {"tagA", "tagB"}); + GravitinoCommandLine commandLine = + spy( + new GravitinoCommandLine( + mockCommandLine, mockOptions, CommandEntities.TAG, CommandActions.DETAILS)); + + assertThrows(RuntimeException.class, commandLine::handleCommandLine); + verify(commandLine, never()) + .newTagDetails(eq(GravitinoCommandLine.DEFAULT_URL), eq(false), eq("metalake_demo"), any()); + String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MULTIPLE_TAG_COMMAND_ERROR, output); + } + @Test void testCreateTagCommand() { CreateTag mockCreate = mock(CreateTag.class); @@ -138,6 +158,7 @@ void testCreateTagCommand() { "metalake_demo", new String[] {"tagA"}, "comment"); + doReturn(mockCreate).when(mockCreate).validate(); commandLine.handleCommandLine(); verify(mockCreate).handle(); } @@ -145,25 +166,15 @@ void testCreateTagCommand() { @Test void testCreateCommandWithoutTagOption() { Main.useExit = false; - when(mockCommandLine.hasOption(GravitinoOptions.METALAKE)).thenReturn(true); - when(mockCommandLine.getOptionValue(GravitinoOptions.METALAKE)).thenReturn("metalake_demo"); - when(mockCommandLine.hasOption(GravitinoOptions.TAG)).thenReturn(false); - - GravitinoCommandLine commandLine = + CreateTag spyCreate = spy( - new GravitinoCommandLine( - mockCommandLine, mockOptions, CommandEntities.TAG, CommandActions.CREATE)); + new CreateTag( + GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", null, "comment")); - assertThrows(RuntimeException.class, commandLine::handleCommandLine); - verify(commandLine, never()) - .newCreateTags( - eq(GravitinoCommandLine.DEFAULT_URL), - eq(false), - eq("metalake_demo"), - isNull(), - isNull()); + assertThrows(RuntimeException.class, spyCreate::validate); + verify(spyCreate, never()).handle(); String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); - assertEquals(output, ErrorMessages.MISSING_TAG); + assertEquals(ErrorMessages.MISSING_TAG, output); } @Test @@ -183,11 +194,16 @@ void testCreateTagsCommand() { doReturn(mockCreate) .when(commandLine) .newCreateTags( - GravitinoCommandLine.DEFAULT_URL, - false, - "metalake_demo", - new String[] {"tagA", "tagB"}, - "comment"); + eq(GravitinoCommandLine.DEFAULT_URL), + eq(false), + eq("metalake_demo"), + argThat( + argument -> + argument.length == 2 + && argument[0].equals("tagA") + && argument[1].equals("tagB")), + eq("comment")); + doReturn(mockCreate).when(mockCreate).validate(); commandLine.handleCommandLine(); verify(mockCreate).handle(); } @@ -207,6 +223,7 @@ void testCreateTagCommandNoComment() { .when(commandLine) .newCreateTags( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", new String[] {"tagA"}, null); + doReturn(mockCreate).when(mockCreate).validate(); commandLine.handleCommandLine(); verify(mockCreate).handle(); } @@ -226,6 +243,7 @@ void testDeleteTagCommand() { .when(commandLine) .newDeleteTag( GravitinoCommandLine.DEFAULT_URL, false, false, "metalake_demo", new String[] {"tagA"}); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -250,6 +268,7 @@ void testDeleteTagsCommand() { false, "metalake_demo", new String[] {"tagA", "tagB"}); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -270,6 +289,7 @@ void testDeleteTagForceCommand() { .when(commandLine) .newDeleteTag( GravitinoCommandLine.DEFAULT_URL, false, true, "metalake_demo", new String[] {"tagA"}); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -293,68 +313,58 @@ void testSetTagPropertyCommand() { .when(commandLine) .newSetTagProperty( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "tagA", "property", "value"); + doReturn(mockSetProperty).when(mockSetProperty).validate(); commandLine.handleCommandLine(); verify(mockSetProperty).handle(); } @Test - void testSetTagPropertyCommandWithoutPropertyOption() { + void testSetTagPropertyCommandWithoutPropertyAndValue() { Main.useExit = false; - when(mockCommandLine.hasOption(GravitinoOptions.METALAKE)).thenReturn(true); - when(mockCommandLine.getOptionValue(GravitinoOptions.METALAKE)).thenReturn("metalake_demo"); - when(mockCommandLine.hasOption(GravitinoOptions.TAG)).thenReturn(true); - when(mockCommandLine.getOptionValues(GravitinoOptions.TAG)).thenReturn(new String[] {"tagA"}); - when(mockCommandLine.hasOption(GravitinoOptions.PROPERTY)).thenReturn(false); - when(mockCommandLine.hasOption(GravitinoOptions.VALUE)).thenReturn(true); - when(mockCommandLine.getOptionValue(GravitinoOptions.VALUE)).thenReturn("value"); - GravitinoCommandLine commandLine = + SetTagProperty spySetProperty = spy( - new GravitinoCommandLine( - mockCommandLine, mockOptions, CommandEntities.TAG, CommandActions.SET)); + new SetTagProperty( + GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "tagA", null, null)); + assertThrows(RuntimeException.class, spySetProperty::validate); + verify(spySetProperty, never()).handle(); + String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(output, ErrorMessages.MISSING_PROPERTY_AND_VALUE); + } - assertThrows(RuntimeException.class, commandLine::handleCommandLine); - verify(commandLine, never()) - .newSetTagProperty( - eq(GravitinoCommandLine.DEFAULT_URL), - eq(false), - eq("metalake_demo"), - eq("tagA"), - isNull(), - eq("value")); + @Test + void testSetTagPropertyCommandWithoutPropertyOption() { + Main.useExit = false; + SetTagProperty spySetProperty = + spy( + new SetTagProperty( + GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "tagA", null, "value")); + assertThrows(RuntimeException.class, spySetProperty::validate); + verify(spySetProperty, never()).handle(); String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); - assertEquals(output, "The set command only supports tag properties or attaching tags."); + assertEquals(output, ErrorMessages.MISSING_PROPERTY); } @Test void testSetTagPropertyCommandWithoutValueOption() { Main.useExit = false; - when(mockCommandLine.hasOption(GravitinoOptions.METALAKE)).thenReturn(true); - when(mockCommandLine.getOptionValue(GravitinoOptions.METALAKE)).thenReturn("metalake_demo"); - when(mockCommandLine.hasOption(GravitinoOptions.TAG)).thenReturn(true); - when(mockCommandLine.getOptionValues(GravitinoOptions.TAG)).thenReturn(new String[] {"tagA"}); - when(mockCommandLine.hasOption(GravitinoOptions.PROPERTY)).thenReturn(true); - when(mockCommandLine.getOptionValue(GravitinoOptions.PROPERTY)).thenReturn("property"); - when(mockCommandLine.hasOption(GravitinoOptions.VALUE)).thenReturn(false); - GravitinoCommandLine commandLine = + SetTagProperty spySetProperty = spy( - new GravitinoCommandLine( - mockCommandLine, mockOptions, CommandEntities.TAG, CommandActions.SET)); - - assertThrows(RuntimeException.class, commandLine::handleCommandLine); - verify(commandLine, never()) - .newSetTagProperty( - eq(GravitinoCommandLine.DEFAULT_URL), - eq(false), - eq("metalake_demo"), - eq("tagA"), - eq("property"), - isNull()); + new SetTagProperty( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + "tagA", + "property", + null)); + assertThrows(RuntimeException.class, spySetProperty::validate); + verify(spySetProperty, never()).handle(); String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); - assertEquals(output, "The set command only supports tag properties or attaching tags."); + assertEquals(output, ErrorMessages.MISSING_VALUE); } @Test void testSetMultipleTagPropertyCommandError() { + Main.useExit = false; when(mockCommandLine.hasOption(GravitinoOptions.METALAKE)).thenReturn(true); when(mockCommandLine.getOptionValue(GravitinoOptions.METALAKE)).thenReturn("metalake_demo"); when(mockCommandLine.hasOption(GravitinoOptions.TAG)).thenReturn(true); @@ -368,10 +378,17 @@ void testSetMultipleTagPropertyCommandError() { spy( new GravitinoCommandLine( mockCommandLine, mockOptions, CommandEntities.TAG, CommandActions.SET)); - Assertions.assertThrows( - IllegalArgumentException.class, - () -> commandLine.handleCommandLine(), - "Error: The current command only supports one --tag option."); + Assertions.assertThrows(RuntimeException.class, commandLine::handleCommandLine); + verify(commandLine, never()) + .newSetTagProperty( + eq(GravitinoCommandLine.DEFAULT_URL), + eq(false), + eq("metalake_demo"), + any(), + eq("property"), + eq("value")); + String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MULTIPLE_TAG_COMMAND_ERROR, output); } @Test @@ -391,10 +408,38 @@ void testRemoveTagPropertyCommand() { .when(commandLine) .newRemoveTagProperty( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "tagA", "property"); + doReturn(mockRemoveProperty).when(mockRemoveProperty).validate(); commandLine.handleCommandLine(); verify(mockRemoveProperty).handle(); } + @Test + void testRemoveTagPropertyCommandWithMultipleTags() { + Main.useExit = false; + when(mockCommandLine.hasOption(GravitinoOptions.METALAKE)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.METALAKE)).thenReturn("metalake_demo"); + when(mockCommandLine.hasOption(GravitinoOptions.TAG)).thenReturn(true); + when(mockCommandLine.getOptionValues(GravitinoOptions.TAG)) + .thenReturn(new String[] {"tagA", "tagB"}); + when(mockCommandLine.hasOption(GravitinoOptions.PROPERTY)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.PROPERTY)).thenReturn("property"); + GravitinoCommandLine commandLine = + spy( + new GravitinoCommandLine( + mockCommandLine, mockOptions, CommandEntities.TAG, CommandActions.REMOVE)); + + assertThrows(RuntimeException.class, commandLine::handleCommandLine); + verify(commandLine, never()) + .newRemoveTagProperty( + eq(GravitinoCommandLine.DEFAULT_URL), + eq(false), + eq("metalake_demo"), + any(), + eq("property")); + String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MULTIPLE_TAG_COMMAND_ERROR, output); + } + @Test void testListTagPropertiesCommand() { ListTagProperties mockListProperties = mock(ListTagProperties.class); @@ -409,6 +454,7 @@ void testListTagPropertiesCommand() { doReturn(mockListProperties) .when(commandLine) .newListTagProperties(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "tagA"); + doReturn(mockListProperties).when(mockListProperties).validate(); commandLine.handleCommandLine(); verify(mockListProperties).handle(); } @@ -434,6 +480,7 @@ void testDeleteAllTagCommand() { eq("metalake_demo"), any(FullName.class), eq(true)); + doReturn(mockRemoveAllTags).when(mockRemoveAllTags).validate(); commandLine.handleCommandLine(); verify(mockRemoveAllTags).handle(); } @@ -455,10 +502,38 @@ void testUpdateTagCommentCommand() { .when(commandLine) .newUpdateTagComment( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "tagA", "new comment"); + doReturn(mockUpdateComment).when(mockUpdateComment).validate(); commandLine.handleCommandLine(); verify(mockUpdateComment).handle(); } + @Test + void testUpdateTagCommentCommandWithMultipleTags() { + Main.useExit = false; + when(mockCommandLine.hasOption(GravitinoOptions.METALAKE)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.METALAKE)).thenReturn("metalake_demo"); + when(mockCommandLine.hasOption(GravitinoOptions.COMMENT)).thenReturn(true); + when(mockCommandLine.hasOption(GravitinoOptions.TAG)).thenReturn(true); + when(mockCommandLine.getOptionValues(GravitinoOptions.TAG)) + .thenReturn(new String[] {"tagA", "tagB"}); + when(mockCommandLine.getOptionValue(GravitinoOptions.COMMENT)).thenReturn("new comment"); + GravitinoCommandLine commandLine = + spy( + new GravitinoCommandLine( + mockCommandLine, mockOptions, CommandEntities.TAG, CommandActions.UPDATE)); + + assertThrows(RuntimeException.class, commandLine::handleCommandLine); + verify(commandLine, never()) + .newUpdateTagComment( + eq(GravitinoCommandLine.DEFAULT_URL), + eq(false), + eq("metalake_demo"), + any(), + eq("new comment")); + String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MULTIPLE_TAG_COMMAND_ERROR, output); + } + @Test void testUpdateTagNameCommand() { UpdateTagName mockUpdateName = mock(UpdateTagName.class); @@ -475,10 +550,38 @@ void testUpdateTagNameCommand() { doReturn(mockUpdateName) .when(commandLine) .newUpdateTagName(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "tagA", "tagB"); + doReturn(mockUpdateName).when(mockUpdateName).validate(); commandLine.handleCommandLine(); verify(mockUpdateName).handle(); } + @Test + void testUpdateTagNameCommandWithMultipleTags() { + Main.useExit = false; + when(mockCommandLine.hasOption(GravitinoOptions.METALAKE)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.METALAKE)).thenReturn("metalake_demo"); + when(mockCommandLine.hasOption(GravitinoOptions.TAG)).thenReturn(true); + when(mockCommandLine.getOptionValues(GravitinoOptions.TAG)) + .thenReturn(new String[] {"tagA", "tagB"}); + when(mockCommandLine.hasOption(GravitinoOptions.RENAME)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.RENAME)).thenReturn("tagC"); + GravitinoCommandLine commandLine = + spy( + new GravitinoCommandLine( + mockCommandLine, mockOptions, CommandEntities.TAG, CommandActions.UPDATE)); + + assertThrows(RuntimeException.class, commandLine::handleCommandLine); + verify(commandLine, never()) + .newUpdateTagName( + eq(GravitinoCommandLine.DEFAULT_URL), + eq(false), + eq("metalake_demo"), + any(), + eq("tagC")); + String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MULTIPLE_TAG_COMMAND_ERROR, output); + } + @Test void testListEntityTagsCommand() { ListEntityTags mockListTags = mock(ListEntityTags.class); @@ -494,6 +597,7 @@ void testListEntityTagsCommand() { .when(commandLine) .newListEntityTags( eq(GravitinoCommandLine.DEFAULT_URL), eq(false), eq("metalake_demo"), any()); + doReturn(mockListTags).when(mockListTags).validate(); commandLine.handleCommandLine(); verify(mockListTags).handle(); } @@ -525,6 +629,7 @@ public boolean matches(String[] argument) { return argument != null && argument.length > 0 && "tagA".equals(argument[0]); } })); + doReturn(mockTagEntity).when(mockTagEntity).validate(); commandLine.handleCommandLine(); verify(mockTagEntity).handle(); } @@ -532,27 +637,19 @@ public boolean matches(String[] argument) { @Test void testTagEntityCommandWithoutName() { Main.useExit = false; - when(mockCommandLine.hasOption(GravitinoOptions.METALAKE)).thenReturn(true); - when(mockCommandLine.getOptionValue(GravitinoOptions.METALAKE)).thenReturn("metalake_demo"); - when(mockCommandLine.hasOption(GravitinoOptions.NAME)).thenReturn(false); - when(mockCommandLine.hasOption(GravitinoOptions.TAG)).thenReturn(true); - when(mockCommandLine.getOptionValues(GravitinoOptions.TAG)).thenReturn(new String[] {"tagA"}); - GravitinoCommandLine commandLine = + TagEntity spyTagEntity = spy( - new GravitinoCommandLine( - mockCommandLine, mockOptions, CommandEntities.TAG, CommandActions.SET)); - - assertThrows(RuntimeException.class, commandLine::handleCommandLine); - verify(commandLine, never()) - .newTagEntity( - eq(GravitinoCommandLine.DEFAULT_URL), - eq(false), - eq("metalake_demo"), - isNull(), - argThat( - argument -> argument != null && argument.length > 0 && "tagA".equals(argument[0]))); + new TagEntity( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + null, + new String[] {"tagA"})); + + assertThrows(RuntimeException.class, spyTagEntity::validate); + verify(spyTagEntity, never()).handle(); String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); - assertEquals(output, ErrorMessages.MISSING_NAME); + assertEquals(ErrorMessages.MISSING_NAME, output); } @Test @@ -586,6 +683,7 @@ public boolean matches(String[] argument) { && "tagB".equals(argument[1]); } })); + doReturn(mockTagEntity).when(mockTagEntity).validate(); commandLine.handleCommandLine(); verify(mockTagEntity).handle(); } @@ -620,6 +718,7 @@ public boolean matches(String[] argument) { return argument != null && argument.length > 0 && "tagA".equals(argument[0]); } })); + doReturn(mockUntagEntity).when(mockUntagEntity).validate(); commandLine.handleCommandLine(); verify(mockUntagEntity).handle(); } @@ -627,32 +726,19 @@ public boolean matches(String[] argument) { @Test void testUntagEntityCommandWithoutName() { Main.useExit = false; - when(mockCommandLine.hasOption(GravitinoOptions.METALAKE)).thenReturn(true); - when(mockCommandLine.getOptionValue(GravitinoOptions.METALAKE)).thenReturn("metalake_demo"); - when(mockCommandLine.hasOption(GravitinoOptions.NAME)).thenReturn(false); - when(mockCommandLine.hasOption(GravitinoOptions.TAG)).thenReturn(true); - when(mockCommandLine.getOptionValues(GravitinoOptions.TAG)) - .thenReturn(new String[] {"tagA", "tagB"}); - GravitinoCommandLine commandLine = + UntagEntity spyUntagEntity = spy( - new GravitinoCommandLine( - mockCommandLine, mockOptions, CommandEntities.TAG, CommandActions.REMOVE)); - - assertThrows(RuntimeException.class, commandLine::handleCommandLine); - verify(commandLine, never()) - .newUntagEntity( - eq(GravitinoCommandLine.DEFAULT_URL), - eq(false), - eq("metalake_demo"), - isNull(), - argThat( - argument -> - argument != null - && argument.length > 0 - && "tagA".equals(argument[0]) - && "tagB".equals(argument[1]))); + new UntagEntity( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + null, + new String[] {"tagA"})); + + assertThrows(RuntimeException.class, spyUntagEntity::validate); + verify(spyUntagEntity, never()).handle(); String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); - assertEquals(output, ErrorMessages.MISSING_NAME); + assertEquals(ErrorMessages.MISSING_NAME, output); } @Test @@ -688,6 +774,7 @@ public boolean matches(String[] argument) { && "tagB".equals(argument[1]); } })); + doReturn(mockUntagEntity).when(mockUntagEntity).validate(); commandLine.handleCommandLine(); verify(mockUntagEntity).handle(); } @@ -695,18 +782,59 @@ public boolean matches(String[] argument) { @Test void testDeleteTagCommandWithoutTagOption() { Main.useExit = false; + DeleteTag spyDeleteTag = + spy(new DeleteTag(GravitinoCommandLine.DEFAULT_URL, false, false, "metalake", null)); + + assertThrows(RuntimeException.class, spyDeleteTag::validate); + verify(spyDeleteTag, never()).handle(); + String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_TAG, output); + } + + @Test + void testRemoveAllTagsCommand() { + Main.useExit = false; + RemoveAllTags mockRemoveAllTags = mock(RemoveAllTags.class); when(mockCommandLine.hasOption(GravitinoOptions.METALAKE)).thenReturn(true); when(mockCommandLine.getOptionValue(GravitinoOptions.METALAKE)).thenReturn("metalake_demo"); when(mockCommandLine.hasOption(GravitinoOptions.TAG)).thenReturn(false); + when(mockCommandLine.hasOption(GravitinoOptions.NAME)).thenReturn(true); + when(mockCommandLine.getOptionValue(GravitinoOptions.NAME)).thenReturn("catalog.schema.table"); + when(mockCommandLine.hasOption(GravitinoOptions.FORCE)).thenReturn(true); GravitinoCommandLine commandLine = spy( new GravitinoCommandLine( mockCommandLine, mockOptions, CommandEntities.TAG, CommandActions.REMOVE)); - assertThrows(RuntimeException.class, commandLine::handleCommandLine); - verify(commandLine, never()) - .newDeleteTag(GravitinoCommandLine.DEFAULT_URL, false, false, "metalake", null); + doReturn(mockRemoveAllTags) + .when(commandLine) + .newRemoveAllTags( + eq(GravitinoCommandLine.DEFAULT_URL), + eq(false), + eq("metalake_demo"), + argThat( + argument -> + argument != null + && "catalog".equals(argument.getCatalogName()) + && "schema".equals(argument.getSchemaName()) + && "table".equals(argument.getTableName())), + eq(true)); + doReturn(mockRemoveAllTags).when(mockRemoveAllTags).validate(); + commandLine.handleCommandLine(); + verify(mockRemoveAllTags).handle(); + } + + @Test + void testRemoveAllTagsCommandWithoutName() { + Main.useExit = false; + RemoveAllTags spyRemoveAllTags = + spy( + new RemoveAllTags( + GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", null, false)); + + assertThrows(RuntimeException.class, spyRemoveAllTags::validate); + verify(spyRemoveAllTags, never()).handle(); String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); - assertEquals(output, ErrorMessages.MISSING_TAG); + assertEquals(ErrorMessages.MISSING_NAME, output); } } diff --git a/clients/cli/src/test/java/org/apache/gravitino/cli/TestTopicCommands.java b/clients/cli/src/test/java/org/apache/gravitino/cli/TestTopicCommands.java index 7fa2e453f32..31904b88563 100644 --- a/clients/cli/src/test/java/org/apache/gravitino/cli/TestTopicCommands.java +++ b/clients/cli/src/test/java/org/apache/gravitino/cli/TestTopicCommands.java @@ -89,6 +89,7 @@ void testListTopicsCommand() { .when(commandLine) .newListTopics( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", "schema"); + doReturn(mockList).when(mockList).validate(); commandLine.handleCommandLine(); verify(mockList).handle(); } @@ -108,6 +109,7 @@ void testTopicDetailsCommand() { .when(commandLine) .newTopicDetails( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", "schema", "topic"); + doReturn(mockDetails).when(mockDetails).validate(); commandLine.handleCommandLine(); verify(mockDetails).handle(); } @@ -136,6 +138,7 @@ void testCreateTopicCommand() { "schema", "topic", "comment"); + doReturn(mockCreate).when(mockCreate).validate(); commandLine.handleCommandLine(); verify(mockCreate).handle(); } @@ -161,6 +164,7 @@ void testDeleteTopicCommand() { "catalog", "schema", "topic"); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -187,6 +191,7 @@ void testDeleteTopicForceCommand() { "catalog", "schema", "topic"); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -215,6 +220,7 @@ void testUpdateCommentTopicCommand() { "schema", "topic", "new comment"); + doReturn(mockUpdate).when(mockUpdate).validate(); commandLine.handleCommandLine(); verify(mockUpdate).handle(); } @@ -235,6 +241,7 @@ void testListTopicPropertiesCommand() { .when(commandLine) .newListTopicProperties( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "catalog", "schema", "topic"); + doReturn(mockListProperties).when(mockListProperties).validate(); commandLine.handleCommandLine(); verify(mockListProperties).handle(); } @@ -266,10 +273,71 @@ void testSetTopicPropertyCommand() { "topic", "property", "value"); + doReturn(mockSetProperties).when(mockSetProperties).validate(); commandLine.handleCommandLine(); verify(mockSetProperties).handle(); } + @Test + void testSetTopicPropertyCommandWithoutPropertyAndValue() { + Main.useExit = false; + SetTopicProperty spySetProperty = + spy( + new SetTopicProperty( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + "catalog", + "schema", + "topic", + null, + null)); + assertThrows(RuntimeException.class, spySetProperty::validate); + verify(spySetProperty, never()).handle(); + String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_PROPERTY_AND_VALUE, output); + } + + @Test + void testSetTopicPropertyCommandWithoutProperty() { + Main.useExit = false; + SetTopicProperty spySetProperty = + spy( + new SetTopicProperty( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + "catalog", + "schema", + "topic", + null, + "value")); + assertThrows(RuntimeException.class, spySetProperty::validate); + verify(spySetProperty, never()).handle(); + String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_PROPERTY, output); + } + + @Test + void testSetTopicPropertyCommandWithoutValue() { + Main.useExit = false; + SetTopicProperty spySetProperty = + spy( + new SetTopicProperty( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + "catalog", + "schema", + "topic", + "property", + null)); + assertThrows(RuntimeException.class, spySetProperty::validate); + verify(spySetProperty, never()).handle(); + String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_VALUE, output); + } + @Test void testRemoveTopicPropertyCommand() { RemoveTopicProperty mockSetProperties = mock(RemoveTopicProperty.class); @@ -294,10 +362,31 @@ void testRemoveTopicPropertyCommand() { "schema", "topic", "property"); + doReturn(mockSetProperties).when(mockSetProperties).validate(); commandLine.handleCommandLine(); verify(mockSetProperties).handle(); } + @Test + void testRemoveTopicPropertyCommandWithoutProperty() { + Main.useExit = false; + RemoveTopicProperty spyRemoveProperty = + spy( + new RemoveTopicProperty( + GravitinoCommandLine.DEFAULT_URL, + false, + "metalake_demo", + "catalog", + "schema", + "topic", + null)); + + assertThrows(RuntimeException.class, spyRemoveProperty::validate); + verify(spyRemoveProperty, never()).handle(); + String output = new String(errContent.toByteArray(), StandardCharsets.UTF_8).trim(); + assertEquals(ErrorMessages.MISSING_PROPERTY, output); + } + @Test @SuppressWarnings("DefaultCharset") void testListTopicCommandWithoutCatalog() { @@ -317,7 +406,7 @@ void testListTopicCommandWithoutCatalog() { output, ErrorMessages.MISSING_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + Joiner.on(", ").join(Arrays.asList(CommandEntities.CATALOG, CommandEntities.SCHEMA))); } @@ -342,7 +431,7 @@ void testListTopicCommandWithoutSchema() { output, ErrorMessages.MALFORMED_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + Joiner.on(", ").join(Arrays.asList(CommandEntities.SCHEMA))); } @@ -367,7 +456,7 @@ void testTopicDetailsCommandWithoutCatalog() { output, ErrorMessages.MISSING_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + Joiner.on(", ") .join( Arrays.asList( @@ -396,7 +485,7 @@ void testTopicDetailsCommandWithoutSchema() { output, ErrorMessages.MALFORMED_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + Joiner.on(", ").join(Arrays.asList(CommandEntities.SCHEMA, CommandEntities.TOPIC))); } @@ -422,7 +511,7 @@ void testTopicDetailsCommandWithoutTopic() { output, ErrorMessages.MALFORMED_NAME + "\n" - + "Missing required argument(s): " + + ErrorMessages.MISSING_ENTITIES + Joiner.on(", ").join(Arrays.asList(CommandEntities.TOPIC))); } } diff --git a/clients/cli/src/test/java/org/apache/gravitino/cli/TestUserCommands.java b/clients/cli/src/test/java/org/apache/gravitino/cli/TestUserCommands.java index e8630ce9755..c7612f6c870 100644 --- a/clients/cli/src/test/java/org/apache/gravitino/cli/TestUserCommands.java +++ b/clients/cli/src/test/java/org/apache/gravitino/cli/TestUserCommands.java @@ -83,6 +83,7 @@ void testListUsersCommand() { doReturn(mockList) .when(commandLine) .newListUsers(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo"); + doReturn(mockList).when(mockList).validate(); commandLine.handleCommandLine(); verify(mockList).handle(); } @@ -101,6 +102,7 @@ void testUserDetailsCommand() { doReturn(mockDetails) .when(commandLine) .newUserDetails(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "user"); + doReturn(mockDetails).when(mockDetails).validate(); commandLine.handleCommandLine(); verify(mockDetails).handle(); } @@ -120,6 +122,7 @@ void testUserAuditCommand() { doReturn(mockAudit) .when(commandLine) .newUserAudit(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "admin"); + doReturn(mockAudit).when(mockAudit).validate(); commandLine.handleCommandLine(); verify(mockAudit).handle(); } @@ -138,6 +141,7 @@ void testCreateUserCommand() { doReturn(mockCreate) .when(commandLine) .newCreateUser(GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "user"); + doReturn(mockCreate).when(mockCreate).validate(); commandLine.handleCommandLine(); verify(mockCreate).handle(); } @@ -156,6 +160,7 @@ void testDeleteUserCommand() { doReturn(mockDelete) .when(commandLine) .newDeleteUser(GravitinoCommandLine.DEFAULT_URL, false, false, "metalake_demo", "user"); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -175,6 +180,7 @@ void testDeleteUserForceCommand() { doReturn(mockDelete) .when(commandLine) .newDeleteUser(GravitinoCommandLine.DEFAULT_URL, false, true, "metalake_demo", "user"); + doReturn(mockDelete).when(mockDelete).validate(); commandLine.handleCommandLine(); verify(mockDelete).handle(); } @@ -247,6 +253,8 @@ void testRemoveRolesFromUserCommand() { .newRemoveRoleFromUser( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "user", "role1"); + doReturn(mockRemoveFirstRole).when(mockRemoveFirstRole).validate(); + doReturn(mockRemoveSecondRole).when(mockRemoveSecondRole).validate(); commandLine.handleCommandLine(); verify(mockRemoveSecondRole).handle(); @@ -281,6 +289,8 @@ void testAddRolesToUserCommand() { .newAddRoleToUser( GravitinoCommandLine.DEFAULT_URL, false, "metalake_demo", "user", "role1"); + doReturn(mockAddFirstRole).when(mockAddFirstRole).validate(); + doReturn(mockAddSecondRole).when(mockAddSecondRole).validate(); commandLine.handleCommandLine(); verify(mockAddFirstRole).handle(); diff --git a/clients/client-java/src/main/java/org/apache/gravitino/client/ErrorHandlers.java b/clients/client-java/src/main/java/org/apache/gravitino/client/ErrorHandlers.java index 2fca9cde35c..d9b4ddb49f6 100644 --- a/clients/client-java/src/main/java/org/apache/gravitino/client/ErrorHandlers.java +++ b/clients/client-java/src/main/java/org/apache/gravitino/client/ErrorHandlers.java @@ -44,7 +44,6 @@ import org.apache.gravitino.exceptions.ModelAlreadyExistsException; import org.apache.gravitino.exceptions.ModelVersionAliasesAlreadyExistException; import org.apache.gravitino.exceptions.NoSuchCatalogException; -import org.apache.gravitino.exceptions.NoSuchCredentialException; import org.apache.gravitino.exceptions.NoSuchFilesetException; import org.apache.gravitino.exceptions.NoSuchGroupException; import org.apache.gravitino.exceptions.NoSuchMetadataObjectException; @@ -898,10 +897,6 @@ public void accept(ErrorResponse errorResponse) { case ErrorConstants.NOT_FOUND_CODE: if (errorResponse.getType().equals(NoSuchMetalakeException.class.getSimpleName())) { throw new NoSuchMetalakeException(errorMessage); - } else if (errorResponse - .getType() - .equals(NoSuchCredentialException.class.getSimpleName())) { - throw new NoSuchCredentialException(errorMessage); } else { throw new NotFoundException(errorMessage); } diff --git a/clients/client-java/src/main/java/org/apache/gravitino/client/GenericModelCatalog.java b/clients/client-java/src/main/java/org/apache/gravitino/client/GenericModelCatalog.java index 9c1c4654d38..50e9eb246ac 100644 --- a/clients/client-java/src/main/java/org/apache/gravitino/client/GenericModelCatalog.java +++ b/clients/client-java/src/main/java/org/apache/gravitino/client/GenericModelCatalog.java @@ -204,7 +204,7 @@ public void linkModelVersion( NameIdentifier modelFullIdent = modelFullNameIdentifier(ident); BaseResponse resp = restClient.post( - formatModelVersionRequestPath(modelFullIdent), + formatModelVersionRequestPath(modelFullIdent) + "/versions", req, BaseResponse.class, Collections.emptyMap(), diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/TestGenericModelCatalog.java b/clients/client-java/src/test/java/org/apache/gravitino/client/TestGenericModelCatalog.java index 10e3ed678d3..a3575988fc0 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/TestGenericModelCatalog.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/TestGenericModelCatalog.java @@ -380,7 +380,8 @@ public void testLinkModelVersion() throws JsonProcessingException { String modelVersionPath = withSlash( GenericModelCatalog.formatModelVersionRequestPath( - NameIdentifier.of(METALAKE_NAME, CATALOG_NAME, "schema1", "model1"))); + NameIdentifier.of(METALAKE_NAME, CATALOG_NAME, "schema1", "model1")) + + "/versions"); ModelVersionLinkRequest request = new ModelVersionLinkRequest( diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/TestSupportCredentials.java b/clients/client-java/src/test/java/org/apache/gravitino/client/TestSupportCredentials.java index 7b0817c8bb5..842af4a6403 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/TestSupportCredentials.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/TestSupportCredentials.java @@ -19,7 +19,6 @@ package org.apache.gravitino.client; import static org.apache.hc.core5.http.HttpStatus.SC_INTERNAL_SERVER_ERROR; -import static org.apache.hc.core5.http.HttpStatus.SC_NOT_FOUND; import static org.apache.hc.core5.http.HttpStatus.SC_OK; import com.fasterxml.jackson.core.JsonProcessingException; @@ -39,7 +38,6 @@ import org.apache.gravitino.dto.responses.CredentialResponse; import org.apache.gravitino.dto.responses.ErrorResponse; import org.apache.gravitino.dto.util.DTOConverters; -import org.apache.gravitino.exceptions.NoSuchCredentialException; import org.apache.gravitino.file.Fileset; import org.apache.hc.core5.http.Method; import org.junit.jupiter.api.Assertions; @@ -154,16 +152,6 @@ private void testGetCredentials( credentials = supportsCredentials.getCredentials(); Assertions.assertEquals(0, credentials.length); - // Test throw NoSuchCredentialException - ErrorResponse errorResp = - ErrorResponse.notFound(NoSuchCredentialException.class.getSimpleName(), "mock error"); - buildMockResource(Method.GET, path, null, errorResp, SC_NOT_FOUND); - - Throwable ex = - Assertions.assertThrows( - NoSuchCredentialException.class, () -> supportsCredentials.getCredentials()); - Assertions.assertTrue(ex.getMessage().contains("mock error")); - // Test throw internal error ErrorResponse errorResp1 = ErrorResponse.internalError("mock error"); buildMockResource(Method.GET, path, null, errorResp1, SC_INTERNAL_SERVER_ERROR); diff --git a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/AccessControlIT.java b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/AccessControlIT.java index 78c29433439..268ed20f3ce 100644 --- a/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/AccessControlIT.java +++ b/clients/client-java/src/test/java/org/apache/gravitino/client/integration/test/authorization/AccessControlIT.java @@ -121,6 +121,10 @@ void testManageUsers() { users.stream().map(User::name).collect(Collectors.toList())); Assertions.assertEquals(Lists.newArrayList("role1"), users.get(2).roles()); + // ISSUE-6061: Test listUsers with revoked users + metalake.revokeRolesFromUser(Lists.newArrayList("role1"), username); + Assertions.assertEquals(3, metalake.listUsers().length); + // Get a not-existed user Assertions.assertThrows(NoSuchUserException.class, () -> metalake.getUser("not-existed")); @@ -176,6 +180,10 @@ void testManageGroups() { groups.stream().map(Group::name).collect(Collectors.toList())); Assertions.assertEquals(Lists.newArrayList("role2"), groups.get(0).roles()); + // ISSUE-6061: Test listGroups with revoked groups + metalake.revokeRolesFromGroup(Lists.newArrayList("role2"), groupName); + Assertions.assertEquals(2, metalake.listGroups().length); + Assertions.assertTrue(metalake.removeGroup(groupName)); Assertions.assertFalse(metalake.removeGroup(groupName)); diff --git a/clients/client-python/gravitino/client/generic_model_catalog.py b/clients/client-python/gravitino/client/generic_model_catalog.py index ca6b5cd31fb..89bf29be13a 100644 --- a/clients/client-python/gravitino/client/generic_model_catalog.py +++ b/clients/client-python/gravitino/client/generic_model_catalog.py @@ -303,7 +303,7 @@ def link_model_version( request.validate() resp = self.rest_client.post( - f"{self._format_model_version_request_path(model_full_ident)}", + f"{self._format_model_version_request_path(model_full_ident)}/versions", request, error_handler=MODEL_ERROR_HANDLER, ) diff --git a/clients/filesystem-fuse/.cargo/config.toml b/clients/filesystem-fuse/.cargo/config.toml index 37751e880c3..9d5bb048edc 100644 --- a/clients/filesystem-fuse/.cargo/config.toml +++ b/clients/filesystem-fuse/.cargo/config.toml @@ -16,5 +16,4 @@ # under the License. [build] -target-dir = "build" - +rustflags = ["-Adead_code", "-Aclippy::redundant-field-names"] diff --git a/clients/filesystem-fuse/Cargo.toml b/clients/filesystem-fuse/Cargo.toml index 1b186d61cb1..3760bd5285f 100644 --- a/clients/filesystem-fuse/Cargo.toml +++ b/clients/filesystem-fuse/Cargo.toml @@ -29,9 +29,25 @@ repository = "https://github.com/apache/gravitino" name = "gvfs-fuse" path = "src/main.rs" +[lib] +name = "gvfs_fuse" + [dependencies] +async-trait = "0.1" +bytes = "1.6.0" +config = "0.13" +dashmap = "6.1.0" +fuse3 = { version = "0.8.1", "features" = ["tokio-runtime", "unprivileged"] } futures-util = "0.3.30" -libc = "0.2.164" +libc = "0.2.168" log = "0.4.22" +once_cell = "1.20.2" +opendal = { version = "0.46.0", features = ["services-s3"] } +reqwest = { version = "0.12.9", features = ["json"] } +serde = { version = "1.0.216", features = ["derive"] } tokio = { version = "1.38.0", features = ["full"] } -tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } \ No newline at end of file +tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } +urlencoding = "2.1.3" + +[dev-dependencies] +mockito = "0.31" diff --git a/clients/filesystem-fuse/Makefile b/clients/filesystem-fuse/Makefile new file mode 100644 index 00000000000..f4a4cef20ae --- /dev/null +++ b/clients/filesystem-fuse/Makefile @@ -0,0 +1,69 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +.EXPORT_ALL_VARIABLES: + +.PHONY: build +build: + cargo build --all-features --workspace + +fmt: + cargo fmt --all + +cargo-sort: install-cargo-sort + cargo sort -w + +fix-toml: install-taplo-cli + taplo fmt + +check-fmt: + cargo fmt --all -- --check + +check-clippy: + cargo clippy --all-targets --all-features --workspace -- -D warnings + +install-cargo-sort: + cargo install cargo-sort@1.0.9 + +check-cargo-sort: install-cargo-sort + cargo sort -c + +install-cargo-machete: + cargo install cargo-machete + +cargo-machete: install-cargo-machete + cargo machete + +install-taplo-cli: + cargo install taplo-cli@0.9.0 + +check-toml: install-taplo-cli + taplo check + +check: check-fmt check-clippy check-cargo-sort check-toml cargo-machete + +doc-test: + cargo test --no-fail-fast --doc --all-features --workspace + +unit-test: doc-test + cargo test --no-fail-fast --lib --all-features --workspace + +test: doc-test + cargo test --no-fail-fast --all-targets --all-features --workspace + +clean: + cargo clean diff --git a/clients/filesystem-fuse/build.gradle.kts b/clients/filesystem-fuse/build.gradle.kts index 08693ddc5bd..7d24c86a5b0 100644 --- a/clients/filesystem-fuse/build.gradle.kts +++ b/clients/filesystem-fuse/build.gradle.kts @@ -20,8 +20,6 @@ import org.gradle.api.tasks.Exec val checkRustEnvironment by tasks.registering(Exec::class) { - description = "Check if Rust environment." - group = "verification" commandLine("bash", "-c", "cargo --version") standardOutput = System.out errorOutput = System.err @@ -30,36 +28,30 @@ val checkRustEnvironment by tasks.registering(Exec::class) { val buildRustProject by tasks.registering(Exec::class) { dependsOn(checkRustEnvironment) - description = "Compile the Rust project" workingDir = file("$projectDir") - commandLine("bash", "-c", "cargo build --release") + commandLine("bash", "-c", "make build") } val checkRustProject by tasks.registering(Exec::class) { dependsOn(checkRustEnvironment) - description = "Check the Rust project" workingDir = file("$projectDir") - commandLine( - "bash", - "-c", - """ - set -e - echo "Checking the code format" - cargo fmt --all -- --check - - echo "Running clippy" - cargo clippy --all-targets --all-features --workspace -- -D warnings - """.trimIndent() - ) + commandLine("bash", "-c", "make check") } val testRustProject by tasks.registering(Exec::class) { dependsOn(checkRustEnvironment) - description = "Run tests in the Rust project" - group = "verification" workingDir = file("$projectDir") - commandLine("bash", "-c", "cargo test --release") + commandLine("bash", "-c", "make test") + + standardOutput = System.out + errorOutput = System.err +} + +val cleanRustProject by tasks.registering(Exec::class) { + dependsOn(checkRustEnvironment) + workingDir = file("$projectDir") + commandLine("bash", "-c", "make clean") standardOutput = System.out errorOutput = System.err @@ -85,3 +77,7 @@ tasks.named("check") { tasks.named("test") { dependsOn(testRustProject) } + +tasks.named("clean") { + dependsOn(cleanRustProject) +} diff --git a/clients/filesystem-fuse/conf/gvfs_fuse.toml b/clients/filesystem-fuse/conf/gvfs_fuse.toml new file mode 100644 index 00000000000..4bde0e9e1bd --- /dev/null +++ b/clients/filesystem-fuse/conf/gvfs_fuse.toml @@ -0,0 +1,38 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# fuse settings +[fuse] +file_mask = 0o600 +dir_mask = 0o700 +fs_type = "memory" + +[fuse.properties] + +# filesystem settings +[filesystem] +block_size = 8192 + +# Gravitino settings +[gravitino] +uri = "http://localhost:8090" +metalake = "your_metalake" + +# extend settings +[extend_config] +s3-access_key_id = "your access_key" +s3-secret_access_key = "your_secret_key" diff --git a/clients/filesystem-fuse/rust-toolchain.toml b/clients/filesystem-fuse/rust-toolchain.toml new file mode 100644 index 00000000000..a7cf737871d --- /dev/null +++ b/clients/filesystem-fuse/rust-toolchain.toml @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +[toolchain] +channel = "1.82.0" +components = ["rustfmt", "clippy", "rust-src"] +profile = "default" diff --git a/clients/filesystem-fuse/src/config.rs b/clients/filesystem-fuse/src/config.rs new file mode 100644 index 00000000000..17908fd08fc --- /dev/null +++ b/clients/filesystem-fuse/src/config.rs @@ -0,0 +1,330 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +use crate::error::ErrorCode::{ConfigNotFound, InvalidConfig}; +use crate::utils::GvfsResult; +use config::{builder, Config}; +use log::{error, info, warn}; +use serde::Deserialize; +use std::collections::HashMap; +use std::fs; + +pub(crate) const CONF_FUSE_FILE_MASK: ConfigEntity = ConfigEntity::new( + FuseConfig::MODULE_NAME, + "file_mask", + "The default file mask for the FUSE filesystem", + 0o600, +); + +pub(crate) const CONF_FUSE_DIR_MASK: ConfigEntity = ConfigEntity::new( + FuseConfig::MODULE_NAME, + "dir_mask", + "The default directory mask for the FUSE filesystem", + 0o700, +); + +pub(crate) const CONF_FUSE_FS_TYPE: ConfigEntity<&'static str> = ConfigEntity::new( + FuseConfig::MODULE_NAME, + "fs_type", + "The type of the FUSE filesystem", + "memory", +); + +pub(crate) const CONF_FUSE_CONFIG_PATH: ConfigEntity<&'static str> = ConfigEntity::new( + FuseConfig::MODULE_NAME, + "config_path", + "The path of the FUSE configuration file", + "/etc/gvfs/gvfs.toml", +); + +pub(crate) const CONF_FILESYSTEM_BLOCK_SIZE: ConfigEntity = ConfigEntity::new( + FilesystemConfig::MODULE_NAME, + "block_size", + "The block size of the gvfs fuse filesystem", + 4096, +); + +pub(crate) const CONF_GRAVITINO_URI: ConfigEntity<&'static str> = ConfigEntity::new( + GravitinoConfig::MODULE_NAME, + "uri", + "The URI of the Gravitino server", + "http://localhost:8090", +); + +pub(crate) const CONF_GRAVITINO_METALAKE: ConfigEntity<&'static str> = ConfigEntity::new( + GravitinoConfig::MODULE_NAME, + "metalake", + "The metalake of the Gravitino server", + "", +); + +pub(crate) struct ConfigEntity { + module: &'static str, + name: &'static str, + description: &'static str, + pub(crate) default: T, +} + +impl ConfigEntity { + const fn new( + module: &'static str, + name: &'static str, + description: &'static str, + default: T, + ) -> Self { + ConfigEntity { + module: module, + name: name, + description: description, + default: default, + } + } +} + +enum ConfigValue { + I32(ConfigEntity), + U32(ConfigEntity), + String(ConfigEntity<&'static str>), + Bool(ConfigEntity), + Float(ConfigEntity), +} + +struct DefaultConfig { + configs: HashMap, +} + +impl Default for DefaultConfig { + fn default() -> Self { + let mut configs = HashMap::new(); + + configs.insert( + Self::compose_key(CONF_FUSE_FILE_MASK), + ConfigValue::U32(CONF_FUSE_FILE_MASK), + ); + configs.insert( + Self::compose_key(CONF_FUSE_DIR_MASK), + ConfigValue::U32(CONF_FUSE_DIR_MASK), + ); + configs.insert( + Self::compose_key(CONF_FUSE_FS_TYPE), + ConfigValue::String(CONF_FUSE_FS_TYPE), + ); + configs.insert( + Self::compose_key(CONF_FUSE_CONFIG_PATH), + ConfigValue::String(CONF_FUSE_CONFIG_PATH), + ); + configs.insert( + Self::compose_key(CONF_GRAVITINO_URI), + ConfigValue::String(CONF_GRAVITINO_URI), + ); + configs.insert( + Self::compose_key(CONF_GRAVITINO_METALAKE), + ConfigValue::String(CONF_GRAVITINO_METALAKE), + ); + configs.insert( + Self::compose_key(CONF_FILESYSTEM_BLOCK_SIZE), + ConfigValue::U32(CONF_FILESYSTEM_BLOCK_SIZE), + ); + + DefaultConfig { configs } + } +} + +impl DefaultConfig { + fn compose_key(entity: ConfigEntity) -> String { + format!("{}.{}", entity.module, entity.name) + } +} + +#[derive(Debug, Deserialize)] +pub struct AppConfig { + #[serde(default)] + pub fuse: FuseConfig, + #[serde(default)] + pub filesystem: FilesystemConfig, + #[serde(default)] + pub gravitino: GravitinoConfig, + #[serde(default)] + pub extend_config: HashMap, +} + +impl Default for AppConfig { + fn default() -> Self { + let builder = Self::crete_default_config_builder(); + let conf = builder + .build() + .expect("Failed to build default configuration"); + conf.try_deserialize::() + .expect("Failed to deserialize default AppConfig") + } +} + +type ConfigBuilder = builder::ConfigBuilder; + +impl AppConfig { + fn crete_default_config_builder() -> ConfigBuilder { + let default = DefaultConfig::default(); + + default + .configs + .values() + .fold( + Config::builder(), + |builder, config_entity| match config_entity { + ConfigValue::I32(entity) => Self::add_config(builder, entity), + ConfigValue::U32(entity) => Self::add_config(builder, entity), + ConfigValue::String(entity) => Self::add_config(builder, entity), + ConfigValue::Bool(entity) => Self::add_config(builder, entity), + ConfigValue::Float(entity) => Self::add_config(builder, entity), + }, + ) + } + + fn add_config>( + builder: ConfigBuilder, + entity: &ConfigEntity, + ) -> ConfigBuilder { + let name = format!("{}.{}", entity.module, entity.name); + builder + .set_default(&name, entity.default.clone().into()) + .unwrap_or_else(|e| panic!("Failed to set default for {}: {}", entity.name, e)) + } + + pub fn from_file(config_file_path: Option<&str>) -> GvfsResult { + let builder = Self::crete_default_config_builder(); + + let config_path = { + if config_file_path.is_some() { + let path = config_file_path.unwrap(); + //check config file exists + if fs::metadata(path).is_err() { + return Err( + ConfigNotFound.to_error("The configuration file not found".to_string()) + ); + } + info!("Use configuration file: {}", path); + path + } else { + //use default config + if fs::metadata(CONF_FUSE_CONFIG_PATH.default).is_err() { + warn!( + "The default configuration file is not found, using the default configuration" + ); + return Ok(AppConfig::default()); + } else { + warn!( + "Using the default config file {}", + CONF_FUSE_CONFIG_PATH.default + ); + } + CONF_FUSE_CONFIG_PATH.default + } + }; + let config = builder + .add_source(config::File::with_name(config_path).required(true)) + .build(); + if let Err(e) = config { + let msg = format!("Failed to build configuration: {}", e); + error!("{}", msg); + return Err(InvalidConfig.to_error(msg)); + } + + let conf = config.unwrap(); + let app_config = conf.try_deserialize::(); + + if let Err(e) = app_config { + let msg = format!("Failed to deserialize configuration: {}", e); + error!("{}", msg); + return Err(InvalidConfig.to_error(msg)); + } + Ok(app_config.unwrap()) + } +} + +#[derive(Debug, Deserialize, Default)] +pub struct FuseConfig { + #[serde(default)] + pub file_mask: u32, + #[serde(default)] + pub dir_mask: u32, + #[serde(default)] + pub fs_type: String, + #[serde(default)] + pub config_path: String, + #[serde(default)] + pub properties: HashMap, +} + +impl FuseConfig { + const MODULE_NAME: &'static str = "fuse"; +} + +#[derive(Debug, Deserialize, Default)] +pub struct FilesystemConfig { + #[serde(default)] + pub block_size: u32, +} + +impl FilesystemConfig { + const MODULE_NAME: &'static str = "filesystem"; +} + +#[derive(Debug, Deserialize, Default)] +pub struct GravitinoConfig { + #[serde(default)] + pub uri: String, + #[serde(default)] + pub metalake: String, +} + +impl GravitinoConfig { + const MODULE_NAME: &'static str = "gravitino"; +} + +#[cfg(test)] +mod test { + use crate::config::AppConfig; + + #[test] + fn test_config_from_file() { + let config = AppConfig::from_file(Some("tests/conf/config_test.toml")).unwrap(); + assert_eq!(config.fuse.file_mask, 0o644); + assert_eq!(config.fuse.dir_mask, 0o755); + assert_eq!(config.filesystem.block_size, 8192); + assert_eq!(config.gravitino.uri, "http://localhost:8090"); + assert_eq!(config.gravitino.metalake, "test"); + assert_eq!( + config.extend_config.get("s3-access_key_id"), + Some(&"XXX_access_key".to_string()) + ); + assert_eq!( + config.extend_config.get("s3-secret_access_key"), + Some(&"XXX_secret_key".to_string()) + ); + } + + #[test] + fn test_default_config() { + let config = AppConfig::default(); + assert_eq!(config.fuse.file_mask, 0o600); + assert_eq!(config.fuse.dir_mask, 0o700); + assert_eq!(config.filesystem.block_size, 4096); + assert_eq!(config.gravitino.uri, "http://localhost:8090"); + assert_eq!(config.gravitino.metalake, ""); + } +} diff --git a/clients/filesystem-fuse/src/default_raw_filesystem.rs b/clients/filesystem-fuse/src/default_raw_filesystem.rs new file mode 100644 index 00000000000..944181246d5 --- /dev/null +++ b/clients/filesystem-fuse/src/default_raw_filesystem.rs @@ -0,0 +1,461 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +use crate::config::AppConfig; +use crate::filesystem::{ + FileStat, FileSystemContext, PathFileSystem, RawFileSystem, Result, FS_META_FILE_ID, + FS_META_FILE_NAME, FS_META_FILE_PATH, INITIAL_FILE_ID, ROOT_DIR_FILE_ID, + ROOT_DIR_PARENT_FILE_ID, ROOT_DIR_PATH, +}; +use crate::opened_file::{FileHandle, OpenFileFlags, OpenedFile}; +use crate::opened_file_manager::OpenedFileManager; +use async_trait::async_trait; +use bytes::Bytes; +use fuse3::{Errno, FileType}; +use std::collections::HashMap; +use std::ffi::OsStr; +use std::path::{Path, PathBuf}; +use std::sync::atomic::AtomicU64; +use tokio::sync::RwLock; + +/// DefaultRawFileSystem is a simple implementation for the file system. +/// it is used to manage the file metadata and file handle. +/// The operations of the file system are implemented by the PathFileSystem. +pub struct DefaultRawFileSystem { + /// file entries + file_entry_manager: RwLock, + /// opened files + opened_file_manager: OpenedFileManager, + /// file id generator + file_id_generator: AtomicU64, + + /// real filesystem + fs: T, +} + +impl DefaultRawFileSystem { + pub(crate) fn new(fs: T, _config: &AppConfig, _fs_context: &FileSystemContext) -> Self { + Self { + file_entry_manager: RwLock::new(FileEntryManager::new()), + opened_file_manager: OpenedFileManager::new(), + file_id_generator: AtomicU64::new(INITIAL_FILE_ID), + fs, + } + } + + fn next_file_id(&self) -> u64 { + self.file_id_generator + .fetch_add(1, std::sync::atomic::Ordering::SeqCst) + } + + async fn get_file_entry(&self, file_id: u64) -> Result { + self.file_entry_manager + .read() + .await + .get_file_entry_by_id(file_id) + .ok_or(Errno::from(libc::ENOENT)) + } + + async fn get_file_entry_by_path(&self, path: &Path) -> Option { + self.file_entry_manager + .read() + .await + .get_file_entry_by_path(path) + } + + async fn resolve_file_id_to_filestat(&self, file_stat: &mut FileStat, parent_file_id: u64) { + debug_assert!(parent_file_id != 0); + let mut file_manager = self.file_entry_manager.write().await; + let file_entry = file_manager.get_file_entry_by_path(&file_stat.path); + match file_entry { + None => { + // allocate new file id + file_stat.set_file_id(parent_file_id, self.next_file_id()); + file_manager.insert(file_stat.parent_file_id, file_stat.file_id, &file_stat.path); + } + Some(file) => { + // use the exist file id + file_stat.set_file_id(file.parent_file_id, file.file_id); + } + } + } + + async fn open_file_internal( + &self, + file_id: u64, + flags: u32, + kind: FileType, + ) -> Result { + let file_entry = self.get_file_entry(file_id).await?; + + let mut opened_file = { + match kind { + FileType::Directory => { + self.fs + .open_dir(&file_entry.path, OpenFileFlags(flags)) + .await? + } + FileType::RegularFile => { + self.fs + .open_file(&file_entry.path, OpenFileFlags(flags)) + .await? + } + _ => return Err(Errno::from(libc::EINVAL)), + } + }; + // set the exists file id + opened_file.set_file_id(file_entry.parent_file_id, file_id); + let file = self.opened_file_manager.put(opened_file); + let file = file.lock().await; + Ok(file.file_handle()) + } + + async fn remove_file_entry_locked(&self, path: &Path) { + let mut file_manager = self.file_entry_manager.write().await; + file_manager.remove(path); + } + + async fn insert_file_entry_locked(&self, parent_file_id: u64, file_id: u64, path: &Path) { + let mut file_manager = self.file_entry_manager.write().await; + file_manager.insert(parent_file_id, file_id, path); + } + + fn get_meta_file_stat(&self) -> FileStat { + let mut meta_file_stat = + FileStat::new_file_filestat_with_path(Path::new(FS_META_FILE_PATH), 0); + meta_file_stat.set_file_id(ROOT_DIR_FILE_ID, FS_META_FILE_ID); + meta_file_stat + } + + fn is_meta_file(&self, file_id: u64) -> bool { + file_id == FS_META_FILE_ID + } + + fn is_meta_file_name(&self, parent_file_id: u64, name: &OsStr) -> bool { + parent_file_id == ROOT_DIR_FILE_ID && name == OsStr::new(FS_META_FILE_NAME) + } +} + +#[async_trait] +impl RawFileSystem for DefaultRawFileSystem { + async fn init(&self) -> Result<()> { + // init root directory + self.insert_file_entry_locked( + ROOT_DIR_PARENT_FILE_ID, + ROOT_DIR_FILE_ID, + Path::new(ROOT_DIR_PATH), + ) + .await; + + self.insert_file_entry_locked( + ROOT_DIR_FILE_ID, + FS_META_FILE_ID, + Path::new(FS_META_FILE_PATH), + ) + .await; + self.fs.init().await + } + + async fn get_file_path(&self, file_id: u64) -> Result { + let file_entry = self.get_file_entry(file_id).await?; + Ok(file_entry.path.to_string_lossy().to_string()) + } + + async fn valid_file_handle_id(&self, file_id: u64, fh: u64) -> Result<()> { + let fh_file_id = self + .opened_file_manager + .get(fh) + .ok_or(Errno::from(libc::EBADF))? + .lock() + .await + .file_stat + .file_id; + + (file_id == fh_file_id) + .then_some(()) + .ok_or(Errno::from(libc::EBADF)) + } + + async fn stat(&self, file_id: u64) -> Result { + if self.is_meta_file(file_id) { + return Ok(self.get_meta_file_stat()); + } + + let file_entry = self.get_file_entry(file_id).await?; + let mut file_stat = self.fs.stat(&file_entry.path).await?; + file_stat.set_file_id(file_entry.parent_file_id, file_entry.file_id); + Ok(file_stat) + } + + async fn lookup(&self, parent_file_id: u64, name: &OsStr) -> Result { + if self.is_meta_file_name(parent_file_id, name) { + return Ok(self.get_meta_file_stat()); + } + + let parent_file_entry = self.get_file_entry(parent_file_id).await?; + let path = parent_file_entry.path.join(name); + let mut file_stat = self.fs.stat(&path).await?; + // fill the file id to file stat + self.resolve_file_id_to_filestat(&mut file_stat, parent_file_id) + .await; + + Ok(file_stat) + } + + async fn read_dir(&self, file_id: u64) -> Result> { + let file_entry = self.get_file_entry(file_id).await?; + let mut child_filestats = self.fs.read_dir(&file_entry.path).await?; + for file_stat in child_filestats.iter_mut() { + self.resolve_file_id_to_filestat(file_stat, file_id).await; + } + + if file_id == ROOT_DIR_FILE_ID { + child_filestats.push(self.get_meta_file_stat()); + } + Ok(child_filestats) + } + + async fn open_file(&self, file_id: u64, flags: u32) -> Result { + if self.is_meta_file(file_id) { + let meta_file = OpenedFile::new(self.get_meta_file_stat()); + let resutl = self.opened_file_manager.put(meta_file); + let file = resutl.lock().await; + return Ok(file.file_handle()); + } + + self.open_file_internal(file_id, flags, FileType::RegularFile) + .await + } + + async fn open_dir(&self, file_id: u64, flags: u32) -> Result { + self.open_file_internal(file_id, flags, FileType::Directory) + .await + } + + async fn create_file( + &self, + parent_file_id: u64, + name: &OsStr, + flags: u32, + ) -> Result { + if self.is_meta_file_name(parent_file_id, name) { + return Err(Errno::from(libc::EEXIST)); + } + + let parent_file_entry = self.get_file_entry(parent_file_id).await?; + let mut file_without_id = self + .fs + .create_file(&parent_file_entry.path.join(name), OpenFileFlags(flags)) + .await?; + + file_without_id.set_file_id(parent_file_id, self.next_file_id()); + + // insert the new file to file entry manager + self.insert_file_entry_locked( + parent_file_id, + file_without_id.file_stat.file_id, + &file_without_id.file_stat.path, + ) + .await; + + // put the openfile to the opened file manager and allocate a file handle id + let file_with_id = self.opened_file_manager.put(file_without_id); + let opened_file_with_file_handle_id = file_with_id.lock().await; + Ok(opened_file_with_file_handle_id.file_handle()) + } + + async fn create_dir(&self, parent_file_id: u64, name: &OsStr) -> Result { + let parent_file_entry = self.get_file_entry(parent_file_id).await?; + let path = parent_file_entry.path.join(name); + let mut filestat = self.fs.create_dir(&path).await?; + + filestat.set_file_id(parent_file_id, self.next_file_id()); + + // insert the new file to file entry manager + self.insert_file_entry_locked(parent_file_id, filestat.file_id, &filestat.path) + .await; + Ok(filestat.file_id) + } + + async fn set_attr(&self, file_id: u64, file_stat: &FileStat) -> Result<()> { + if self.is_meta_file(file_id) { + return Ok(()); + } + + let file_entry = self.get_file_entry(file_id).await?; + self.fs.set_attr(&file_entry.path, file_stat, true).await + } + + async fn remove_file(&self, parent_file_id: u64, name: &OsStr) -> Result<()> { + if self.is_meta_file_name(parent_file_id, name) { + return Err(Errno::from(libc::EPERM)); + } + + let parent_file_entry = self.get_file_entry(parent_file_id).await?; + let path = parent_file_entry.path.join(name); + self.fs.remove_file(&path).await?; + + // remove the file from file entry manager + self.remove_file_entry_locked(&path).await; + Ok(()) + } + + async fn remove_dir(&self, parent_file_id: u64, name: &OsStr) -> Result<()> { + let parent_file_entry = self.get_file_entry(parent_file_id).await?; + let path = parent_file_entry.path.join(name); + self.fs.remove_dir(&path).await?; + + // remove the dir from file entry manager + self.remove_file_entry_locked(&path).await; + Ok(()) + } + + async fn flush_file(&self, _file_id: u64, fh: u64) -> Result<()> { + let opened_file = self + .opened_file_manager + .get(fh) + .ok_or(Errno::from(libc::EBADF))?; + let mut file = opened_file.lock().await; + file.flush().await + } + + async fn close_file(&self, _file_id: u64, fh: u64) -> Result<()> { + let opened_file = self + .opened_file_manager + .remove(fh) + .ok_or(Errno::from(libc::EBADF))?; + let mut file = opened_file.lock().await; + file.close().await + } + + async fn read(&self, file_id: u64, fh: u64, offset: u64, size: u32) -> Result { + if self.is_meta_file(file_id) { + return Ok(Bytes::new()); + } + + let (data, file_stat) = { + let opened_file = self + .opened_file_manager + .get(fh) + .ok_or(Errno::from(libc::EBADF))?; + let mut opened_file = opened_file.lock().await; + let data = opened_file.read(offset, size).await; + (data, opened_file.file_stat.clone()) + }; + + // update the file atime + self.fs.set_attr(&file_stat.path, &file_stat, false).await?; + + data + } + + async fn write(&self, file_id: u64, fh: u64, offset: u64, data: &[u8]) -> Result { + if self.is_meta_file(file_id) { + return Err(Errno::from(libc::EPERM)); + } + + let (len, file_stat) = { + let opened_file = self + .opened_file_manager + .get(fh) + .ok_or(Errno::from(libc::EBADF))?; + let mut opened_file = opened_file.lock().await; + let len = opened_file.write(offset, data).await; + (len, opened_file.file_stat.clone()) + }; + + // update the file size, mtime and atime + self.fs.set_attr(&file_stat.path, &file_stat, false).await?; + + len + } +} + +/// File entry is represent the abstract file. +#[derive(Debug, Clone)] +struct FileEntry { + file_id: u64, + parent_file_id: u64, + path: PathBuf, +} + +/// FileEntryManager is manage all the file entries in memory. it is used manger the file relationship and name mapping. +struct FileEntryManager { + // file_id_map is a map of file_id to file entry. + file_id_map: HashMap, + + // file_path_map is a map of file path to file entry. + file_path_map: HashMap, +} + +impl FileEntryManager { + fn new() -> Self { + Self { + file_id_map: HashMap::new(), + file_path_map: HashMap::new(), + } + } + + fn get_file_entry_by_id(&self, file_id: u64) -> Option { + self.file_id_map.get(&file_id).cloned() + } + + fn get_file_entry_by_path(&self, path: &Path) -> Option { + self.file_path_map.get(path).cloned() + } + + fn insert(&mut self, parent_file_id: u64, file_id: u64, path: &Path) { + let file_entry = FileEntry { + file_id, + parent_file_id, + path: path.into(), + }; + self.file_id_map.insert(file_id, file_entry.clone()); + self.file_path_map.insert(path.into(), file_entry); + } + + fn remove(&mut self, path: &Path) { + if let Some(file) = self.file_path_map.remove(path) { + self.file_id_map.remove(&file.file_id); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_file_entry_manager() { + let mut manager = FileEntryManager::new(); + manager.insert(1, 2, Path::new("a/b")); + let file = manager.get_file_entry_by_id(2).unwrap(); + assert_eq!(file.file_id, 2); + assert_eq!(file.parent_file_id, 1); + assert_eq!(file.path, Path::new("a/b")); + + let file = manager.get_file_entry_by_path(Path::new("a/b")).unwrap(); + assert_eq!(file.file_id, 2); + assert_eq!(file.parent_file_id, 1); + assert_eq!(file.path, Path::new("a/b")); + + manager.remove(Path::new("a/b")); + assert!(manager.get_file_entry_by_id(2).is_none()); + assert!(manager.get_file_entry_by_path(Path::new("a/b")).is_none()); + } +} diff --git a/clients/filesystem-fuse/src/error.rs b/clients/filesystem-fuse/src/error.rs new file mode 100644 index 00000000000..7e38e46874c --- /dev/null +++ b/clients/filesystem-fuse/src/error.rs @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +use fuse3::Errno; + +#[derive(Debug, Copy, Clone)] +pub enum ErrorCode { + UnSupportedFilesystem, + GravitinoClientError, + InvalidConfig, + ConfigNotFound, + OpenDalError, +} + +impl ErrorCode { + pub fn to_error(self, message: impl Into) -> GvfsError { + GvfsError::Error(self, message.into()) + } +} + +impl std::fmt::Display for ErrorCode { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + ErrorCode::UnSupportedFilesystem => write!(f, "Unsupported filesystem"), + ErrorCode::GravitinoClientError => write!(f, "Gravitino client error"), + ErrorCode::InvalidConfig => write!(f, "Invalid config"), + ErrorCode::ConfigNotFound => write!(f, "Config not found"), + ErrorCode::OpenDalError => write!(f, "OpenDal error"), + } + } +} + +#[derive(Debug)] +pub enum GvfsError { + RestError(String, reqwest::Error), + Error(ErrorCode, String), + Errno(Errno), + IOError(std::io::Error), +} +impl From for GvfsError { + fn from(err: reqwest::Error) -> Self { + GvfsError::RestError("Http request failed:".to_owned() + &err.to_string(), err) + } +} + +impl From for GvfsError { + fn from(errno: Errno) -> Self { + GvfsError::Errno(errno) + } +} + +impl From for GvfsError { + fn from(err: std::io::Error) -> Self { + GvfsError::IOError(err) + } +} diff --git a/clients/filesystem-fuse/src/filesystem.rs b/clients/filesystem-fuse/src/filesystem.rs new file mode 100644 index 00000000000..dcf35f8ebca --- /dev/null +++ b/clients/filesystem-fuse/src/filesystem.rs @@ -0,0 +1,740 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +use crate::config::{ + AppConfig, CONF_FILESYSTEM_BLOCK_SIZE, CONF_FUSE_DIR_MASK, CONF_FUSE_FILE_MASK, +}; +use crate::opened_file::{FileHandle, OpenFileFlags, OpenedFile}; +use async_trait::async_trait; +use bytes::Bytes; +use fuse3::FileType::{Directory, RegularFile}; +use fuse3::{Errno, FileType, Timestamp}; +use std::ffi::{OsStr, OsString}; +use std::path::{Path, PathBuf}; +use std::time::SystemTime; + +pub(crate) type Result = std::result::Result; + +pub(crate) const ROOT_DIR_PARENT_FILE_ID: u64 = 1; +pub(crate) const ROOT_DIR_FILE_ID: u64 = 1; +pub(crate) const ROOT_DIR_NAME: &str = ""; +pub(crate) const ROOT_DIR_PATH: &str = "/"; +pub(crate) const INITIAL_FILE_ID: u64 = 10000; + +// File system meta file is indicated the fuse filesystem is active. +pub(crate) const FS_META_FILE_PATH: &str = "/.gvfs_meta"; +pub(crate) const FS_META_FILE_NAME: &str = ".gvfs_meta"; +pub(crate) const FS_META_FILE_ID: u64 = 10; + +/// RawFileSystem interface for the file system implementation. it use by FuseApiHandle, +/// it ues the file id to operate the file system apis +/// the `file_id` and `parent_file_id` it is the unique identifier for the file system, +/// it is used to identify the file or directory +/// the `handle_id` it is the file handle, it is used to identify the opened file, +/// it is used to read or write the file content +/// the `file id` and `handle_id` need to mapping the `ino`/`inode` and `fh` in the fuse3 +#[async_trait] +pub(crate) trait RawFileSystem: Send + Sync { + /// Init the file system + async fn init(&self) -> Result<()>; + + /// Get the file path by file id, if the file id is valid, return the file path + async fn get_file_path(&self, file_id: u64) -> Result; + + /// Validate the file id and file handle, if file id and file handle is valid and it associated, return Ok + async fn valid_file_handle_id(&self, file_id: u64, fh: u64) -> Result<()>; + + /// Get the file stat by file id. if the file id is valid, return the file stat + async fn stat(&self, file_id: u64) -> Result; + + /// Lookup the file by parent file id and file name, if the file exists, return the file stat + async fn lookup(&self, parent_file_id: u64, name: &OsStr) -> Result; + + /// Read the directory by file id, if the file id is a valid directory, return the file stat list + async fn read_dir(&self, dir_file_id: u64) -> Result>; + + /// Open the file by file id and flags, if the file id is a valid file, return the file handle + async fn open_file(&self, file_id: u64, flags: u32) -> Result; + + /// Open the directory by file id and flags, if successful, return the file handle + async fn open_dir(&self, file_id: u64, flags: u32) -> Result; + + /// Create the file by parent file id and file name and flags, if successful, return the file handle + async fn create_file( + &self, + parent_file_id: u64, + name: &OsStr, + flags: u32, + ) -> Result; + + /// Create the directory by parent file id and file name, if successful, return the file id + async fn create_dir(&self, parent_file_id: u64, name: &OsStr) -> Result; + + /// Set the file attribute by file id and file stat + async fn set_attr(&self, file_id: u64, file_stat: &FileStat) -> Result<()>; + + /// Remove the file by parent file id and file name + async fn remove_file(&self, parent_file_id: u64, name: &OsStr) -> Result<()>; + + /// Remove the directory by parent file id and file name + async fn remove_dir(&self, parent_file_id: u64, name: &OsStr) -> Result<()>; + + /// flush the file with file id and file handle, if successful return Ok + async fn flush_file(&self, file_id: u64, fh: u64) -> Result<()>; + + /// Close the file by file id and file handle, if successful + async fn close_file(&self, file_id: u64, fh: u64) -> Result<()>; + + /// Read the file content by file id, file handle, offset and size, if successful, return the read result + async fn read(&self, file_id: u64, fh: u64, offset: u64, size: u32) -> Result; + + /// Write the file content by file id, file handle, offset and data, if successful, return the written size + async fn write(&self, file_id: u64, fh: u64, offset: u64, data: &[u8]) -> Result; +} + +/// PathFileSystem is the interface for the file system implementation, it use to interact with other file system +/// it is used file path to operate the file system +#[async_trait] +pub(crate) trait PathFileSystem: Send + Sync { + /// Init the file system + async fn init(&self) -> Result<()>; + + /// Get the file stat by file path, if the file exists, return the file stat + async fn stat(&self, path: &Path) -> Result; + + /// Read the directory by file path, if the directory exists, return the file stat list + async fn read_dir(&self, path: &Path) -> Result>; + + /// Open the file by file path and flags, if the file exists, return the opened file + async fn open_file(&self, path: &Path, flags: OpenFileFlags) -> Result; + + /// Open the directory by file path and flags, if the file exists, return the opened file + async fn open_dir(&self, path: &Path, flags: OpenFileFlags) -> Result; + + /// Create the file by file path and flags, if successful, return the opened file + async fn create_file(&self, path: &Path, flags: OpenFileFlags) -> Result; + + /// Create the directory by file path , if successful, return the file stat + async fn create_dir(&self, path: &Path) -> Result; + + /// Set the file attribute by file path and file stat + async fn set_attr(&self, path: &Path, file_stat: &FileStat, flush: bool) -> Result<()>; + + /// Remove the file by file path + async fn remove_file(&self, path: &Path) -> Result<()>; + + /// Remove the directory by file path + async fn remove_dir(&self, path: &Path) -> Result<()>; + + fn get_capacity(&self) -> Result; +} + +// FileSystemContext is the system environment for the fuse file system. +pub(crate) struct FileSystemContext { + // system user id + pub(crate) uid: u32, + + // system group id + pub(crate) gid: u32, + + // default file permission + pub(crate) default_file_perm: u16, + + // default idr permission + pub(crate) default_dir_perm: u16, + + // io block size + pub(crate) block_size: u32, +} + +impl FileSystemContext { + pub(crate) fn new(uid: u32, gid: u32, config: &AppConfig) -> Self { + FileSystemContext { + uid, + gid, + default_file_perm: config.fuse.file_mask as u16, + default_dir_perm: config.fuse.dir_mask as u16, + block_size: config.filesystem.block_size, + } + } + + pub(crate) fn default() -> Self { + FileSystemContext { + uid: 0, + gid: 0, + default_file_perm: CONF_FUSE_FILE_MASK.default as u16, + default_dir_perm: CONF_FUSE_DIR_MASK.default as u16, + block_size: CONF_FILESYSTEM_BLOCK_SIZE.default, + } + } +} + +// capacity of the file system +pub struct FileSystemCapacity {} + +// FileStat is the file metadata of the file +#[derive(Clone, Debug)] +pub struct FileStat { + // file id for the file system. + pub(crate) file_id: u64, + + // parent file id + pub(crate) parent_file_id: u64, + + // file name + pub(crate) name: OsString, + + // file path of the fuse file system root + pub(crate) path: PathBuf, + + // file size + pub(crate) size: u64, + + // file type like regular file or directory and so on + pub(crate) kind: FileType, + + // file access time + pub(crate) atime: Timestamp, + + // file modify time + pub(crate) mtime: Timestamp, + + // file create time + pub(crate) ctime: Timestamp, + + // file link count + pub(crate) nlink: u32, +} + +impl FileStat { + pub fn new_file_filestat_with_path(path: &Path, size: u64) -> Self { + Self::new_filestat(path, size, RegularFile) + } + + pub fn new_dir_filestat_with_path(path: &Path) -> Self { + Self::new_filestat(path, 0, Directory) + } + + pub fn new_file_filestat(parent: &Path, name: &OsStr, size: u64) -> Self { + let path = parent.join(name); + Self::new_filestat(&path, size, RegularFile) + } + + pub fn new_dir_filestat(parent: &Path, name: &OsStr) -> Self { + let path = parent.join(name); + Self::new_filestat(&path, 0, Directory) + } + + pub fn new_filestat(path: &Path, size: u64, kind: FileType) -> Self { + let atime = Timestamp::from(SystemTime::now()); + // root directory name is "" + let name = path.file_name().unwrap_or(OsStr::new(ROOT_DIR_NAME)); + Self { + file_id: 0, + parent_file_id: 0, + name: name.to_os_string(), + path: path.into(), + size: size, + kind: kind, + atime: atime, + mtime: atime, + ctime: atime, + nlink: 1, + } + } + + pub(crate) fn set_file_id(&mut self, parent_file_id: u64, file_id: u64) { + debug_assert!(file_id != 0 && parent_file_id != 0); + self.parent_file_id = parent_file_id; + self.file_id = file_id; + } +} + +/// File reader interface for read file content +#[async_trait] +pub(crate) trait FileReader: Sync + Send { + /// read the file content by offset and size, if successful, return the read result + async fn read(&mut self, offset: u64, size: u32) -> Result; + + /// close the file + async fn close(&mut self) -> Result<()> { + Ok(()) + } +} + +/// File writer interface for write file content +#[async_trait] +pub trait FileWriter: Sync + Send { + /// write the file content by offset and data, if successful, return the written size + async fn write(&mut self, offset: u64, data: &[u8]) -> Result; + + /// close the file + async fn close(&mut self) -> Result<()> { + Ok(()) + } + + /// flush the file + async fn flush(&mut self) -> Result<()> { + Ok(()) + } +} + +#[cfg(test)] +pub(crate) mod tests { + use super::*; + use libc::{O_APPEND, O_CREAT, O_RDONLY}; + use std::collections::HashMap; + use std::path::Component; + + pub(crate) struct TestPathFileSystem { + files: HashMap, + fs: F, + cwd: PathBuf, + } + + impl TestPathFileSystem { + pub(crate) fn new(cwd: &Path, fs: F) -> Self { + Self { + files: HashMap::new(), + fs, + cwd: cwd.into(), + } + } + + pub(crate) async fn test_path_file_system(&mut self) { + // test root dir + let resutl = self.fs.stat(Path::new("/")).await; + assert!(resutl.is_ok()); + let root_file_stat = resutl.unwrap(); + self.assert_file_stat(&root_file_stat, Path::new("/"), Directory, 0); + + // test list root dir + let result = self.fs.read_dir(Path::new("/")).await; + assert!(result.is_ok()); + + // Test create file + self.test_create_file(&self.cwd.join("file1.txt")).await; + + // Test create dir + self.test_create_dir(&self.cwd.join("dir1")).await; + + // Test list dir + self.test_list_dir(&self.cwd).await; + + // Test remove file + self.test_remove_file(&self.cwd.join("file1.txt")).await; + + // Test remove dir + self.test_remove_dir(&self.cwd.join("dir1")).await; + + // Test file not found + self.test_file_not_found(&self.cwd.join("unknown")).await; + } + + async fn test_stat_file(&mut self, path: &Path, expect_kind: FileType, expect_size: u64) { + let file_stat = self.fs.stat(path).await; + assert!(file_stat.is_ok()); + let file_stat = file_stat.unwrap(); + self.assert_file_stat(&file_stat, path, expect_kind, expect_size); + self.files.insert(file_stat.path.clone(), file_stat); + } + + async fn test_create_file(&mut self, path: &Path) { + let opened_file = self.fs.create_file(path, OpenFileFlags(0)).await; + assert!(opened_file.is_ok()); + let file = opened_file.unwrap(); + self.assert_file_stat(&file.file_stat, path, RegularFile, 0); + self.test_stat_file(path, RegularFile, 0).await; + } + + async fn test_create_dir(&mut self, path: &Path) { + let dir_stat = self.fs.create_dir(path).await; + assert!(dir_stat.is_ok()); + let dir_stat = dir_stat.unwrap(); + self.assert_file_stat(&dir_stat, path, Directory, 0); + self.test_stat_file(path, Directory, 0).await; + } + + async fn test_list_dir(&self, path: &Path) { + let list_dir = self.fs.read_dir(path).await; + assert!(list_dir.is_ok()); + let list_dir = list_dir.unwrap(); + for file_stat in list_dir { + assert!(self.files.contains_key(&file_stat.path)); + let actual_file_stat = self.files.get(&file_stat.path).unwrap(); + self.assert_file_stat( + &file_stat, + &actual_file_stat.path, + actual_file_stat.kind, + actual_file_stat.size, + ); + } + } + + async fn test_remove_file(&mut self, path: &Path) { + let remove_file = self.fs.remove_file(path).await; + assert!(remove_file.is_ok()); + self.files.remove(path); + + self.test_file_not_found(path).await; + } + + async fn test_remove_dir(&mut self, path: &Path) { + let remove_dir = self.fs.remove_dir(path).await; + assert!(remove_dir.is_ok()); + self.files.remove(path); + + self.test_file_not_found(path).await; + } + + async fn test_file_not_found(&self, path: &Path) { + let not_found_file = self.fs.stat(path).await; + assert!(not_found_file.is_err()); + } + + fn assert_file_stat(&self, file_stat: &FileStat, path: &Path, kind: FileType, size: u64) { + assert_eq!(file_stat.path, path); + assert_eq!(file_stat.kind, kind); + assert_eq!(file_stat.size, size); + } + } + + pub(crate) struct TestRawFileSystem { + fs: F, + files: HashMap, + cwd: PathBuf, + } + + impl TestRawFileSystem { + pub(crate) fn new(cwd: &Path, fs: F) -> Self { + Self { + fs, + files: HashMap::new(), + cwd: cwd.into(), + } + } + + pub(crate) async fn test_raw_file_system(&mut self) { + // Test root dir + self.test_root_dir().await; + + // test read root dir + self.test_list_dir(ROOT_DIR_FILE_ID, false).await; + + // Test lookup meta file + let file_id = self + .test_lookup_file(ROOT_DIR_FILE_ID, ".gvfs_meta".as_ref(), RegularFile, 0) + .await; + + // Test get meta file stat + self.test_stat_file(file_id, Path::new("/.gvfs_meta"), RegularFile, 0) + .await; + + // Test get file path + self.test_get_file_path(file_id, "/.gvfs_meta").await; + + // get cwd file id + let mut parent_file_id = ROOT_DIR_FILE_ID; + for child in self.cwd.components() { + if child == Component::RootDir { + continue; + } + let file_id = self.fs.create_dir(parent_file_id, child.as_os_str()).await; + assert!(file_id.is_ok()); + parent_file_id = file_id.unwrap(); + } + + // Test create file + let file_handle = self + .test_create_file(parent_file_id, "file1.txt".as_ref()) + .await; + + // Test write file + self.test_write_file(&file_handle, "test").await; + + // Test close file + self.test_close_file(&file_handle).await; + + // Test open file with read + let file_handle = self + .test_open_file(parent_file_id, "file1.txt".as_ref(), O_RDONLY as u32) + .await; + + // Test read file + self.test_read_file(&file_handle, "test").await; + + // Test close file + self.test_close_file(&file_handle).await; + + // Test create dir + self.test_create_dir(parent_file_id, "dir1".as_ref()).await; + + // Test list dir + self.test_list_dir(parent_file_id, true).await; + + // Test remove file + self.test_remove_file(parent_file_id, "file1.txt".as_ref()) + .await; + + // Test remove dir + self.test_remove_dir(parent_file_id, "dir1".as_ref()).await; + + // Test list dir again + self.test_list_dir(parent_file_id, true).await; + + // Test file not found + self.test_file_not_found(23).await; + } + + async fn test_root_dir(&self) { + let root_file_stat = self.fs.stat(ROOT_DIR_FILE_ID).await; + assert!(root_file_stat.is_ok()); + let root_file_stat = root_file_stat.unwrap(); + self.assert_file_stat(&root_file_stat, Path::new(ROOT_DIR_PATH), Directory, 0); + } + + async fn test_lookup_file( + &mut self, + parent_file_id: u64, + expect_name: &OsStr, + expect_kind: FileType, + expect_size: u64, + ) -> u64 { + let file_stat = self.fs.lookup(parent_file_id, expect_name).await; + assert!(file_stat.is_ok()); + let file_stat = file_stat.unwrap(); + self.assert_file_stat(&file_stat, &file_stat.path, expect_kind, expect_size); + assert_eq!(file_stat.name, expect_name); + let file_id = file_stat.file_id; + self.files.insert(file_stat.file_id, file_stat); + file_id + } + + async fn test_get_file_path(&mut self, file_id: u64, expect_path: &str) { + let file_path = self.fs.get_file_path(file_id).await; + assert!(file_path.is_ok()); + assert_eq!(file_path.unwrap(), expect_path); + } + + async fn test_stat_file( + &mut self, + file_id: u64, + expect_path: &Path, + expect_kind: FileType, + expect_size: u64, + ) { + let file_stat = self.fs.stat(file_id).await; + assert!(file_stat.is_ok()); + let file_stat = file_stat.unwrap(); + self.assert_file_stat(&file_stat, expect_path, expect_kind, expect_size); + self.files.insert(file_stat.file_id, file_stat); + } + + async fn test_create_file(&mut self, root_file_id: u64, name: &OsStr) -> FileHandle { + let file = self + .fs + .create_file(root_file_id, name, (O_CREAT | O_APPEND) as u32) + .await; + assert!(file.is_ok()); + let file = file.unwrap(); + assert!(file.handle_id > 0); + assert!(file.file_id >= INITIAL_FILE_ID); + let file_stat = self.fs.stat(file.file_id).await; + assert!(file_stat.is_ok()); + + self.test_stat_file(file.file_id, &file_stat.unwrap().path, RegularFile, 0) + .await; + file + } + + async fn test_open_file(&self, root_file_id: u64, name: &OsStr, flags: u32) -> FileHandle { + let file = self.fs.lookup(root_file_id, name).await.unwrap(); + let file_handle = self.fs.open_file(file.file_id, flags).await; + assert!(file_handle.is_ok()); + let file_handle = file_handle.unwrap(); + assert_eq!(file_handle.file_id, file.file_id); + file_handle + } + + async fn test_write_file(&mut self, file_handle: &FileHandle, content: &str) { + let write_size = self + .fs + .write( + file_handle.file_id, + file_handle.handle_id, + 0, + content.as_bytes(), + ) + .await; + + assert!(write_size.is_ok()); + assert_eq!(write_size.unwrap(), content.len() as u32); + + let result = self + .fs + .flush_file(file_handle.file_id, file_handle.handle_id) + .await; + assert!(result.is_ok()); + + self.files.get_mut(&file_handle.file_id).unwrap().size = content.len() as u64; + } + + async fn test_read_file(&self, file_handle: &FileHandle, expected_content: &str) { + let read_data = self + .fs + .read( + file_handle.file_id, + file_handle.handle_id, + 0, + expected_content.len() as u32, + ) + .await; + assert!(read_data.is_ok()); + assert_eq!(read_data.unwrap(), expected_content.as_bytes()); + } + + async fn test_close_file(&self, file_handle: &FileHandle) { + let close_file = self + .fs + .close_file(file_handle.file_id, file_handle.handle_id) + .await; + assert!(close_file.is_ok()); + } + + async fn test_create_dir(&mut self, parent_file_id: u64, name: &OsStr) { + let dir = self.fs.create_dir(parent_file_id, name).await; + assert!(dir.is_ok()); + let dir_file_id = dir.unwrap(); + assert!(dir_file_id >= INITIAL_FILE_ID); + let dir_stat = self.fs.stat(dir_file_id).await; + assert!(dir_stat.is_ok()); + + self.test_stat_file(dir_file_id, &dir_stat.unwrap().path, Directory, 0) + .await; + } + + async fn test_list_dir(&self, root_file_id: u64, check_child: bool) { + let list_dir = self.fs.read_dir(root_file_id).await; + assert!(list_dir.is_ok()); + let list_dir = list_dir.unwrap(); + + if !check_child { + return; + } + for file_stat in list_dir { + assert!(self.files.contains_key(&file_stat.file_id)); + let actual_file_stat = self.files.get(&file_stat.file_id).unwrap(); + self.assert_file_stat( + &file_stat, + &actual_file_stat.path, + actual_file_stat.kind, + actual_file_stat.size, + ); + } + } + + async fn test_remove_file(&mut self, root_file_id: u64, name: &OsStr) { + let file_stat = self.fs.lookup(root_file_id, name).await; + assert!(file_stat.is_ok()); + let file_stat = file_stat.unwrap(); + + let remove_file = self.fs.remove_file(root_file_id, name).await; + assert!(remove_file.is_ok()); + self.files.remove(&file_stat.file_id); + + self.test_file_not_found(file_stat.file_id).await; + } + + async fn test_remove_dir(&mut self, root_file_id: u64, name: &OsStr) { + let file_stat = self.fs.lookup(root_file_id, name).await; + assert!(file_stat.is_ok()); + let file_stat = file_stat.unwrap(); + + let remove_dir = self.fs.remove_dir(root_file_id, name).await; + assert!(remove_dir.is_ok()); + self.files.remove(&file_stat.file_id); + + self.test_file_not_found(file_stat.file_id).await; + } + + async fn test_file_not_found(&self, file_id: u64) { + let not_found_file = self.fs.stat(file_id).await; + assert!(not_found_file.is_err()); + } + + fn assert_file_stat(&self, file_stat: &FileStat, path: &Path, kind: FileType, size: u64) { + assert_eq!(file_stat.path, path); + assert_eq!(file_stat.kind, kind); + assert_eq!(file_stat.size, size); + if file_stat.file_id == ROOT_DIR_FILE_ID || file_stat.file_id == FS_META_FILE_ID { + assert_eq!(file_stat.parent_file_id, 1); + } else { + assert!(file_stat.file_id >= INITIAL_FILE_ID); + assert!( + file_stat.parent_file_id == 1 || file_stat.parent_file_id >= INITIAL_FILE_ID + ); + } + } + } + + #[test] + fn test_create_file_stat() { + //test new file + let file_stat = FileStat::new_file_filestat(Path::new("a"), "b".as_ref(), 10); + assert_eq!(file_stat.name, "b"); + assert_eq!(file_stat.path, Path::new("a/b")); + assert_eq!(file_stat.size, 10); + assert_eq!(file_stat.kind, RegularFile); + + //test new dir + let file_stat = FileStat::new_dir_filestat("a".as_ref(), "b".as_ref()); + assert_eq!(file_stat.name, "b"); + assert_eq!(file_stat.path, Path::new("a/b")); + assert_eq!(file_stat.size, 0); + assert_eq!(file_stat.kind, Directory); + + //test new file with path + let file_stat = FileStat::new_file_filestat_with_path("a/b".as_ref(), 10); + assert_eq!(file_stat.name, "b"); + assert_eq!(file_stat.path, Path::new("a/b")); + assert_eq!(file_stat.size, 10); + assert_eq!(file_stat.kind, RegularFile); + + //test new dir with path + let file_stat = FileStat::new_dir_filestat_with_path("a/b".as_ref()); + assert_eq!(file_stat.name, "b"); + assert_eq!(file_stat.path, Path::new("a/b")); + assert_eq!(file_stat.size, 0); + assert_eq!(file_stat.kind, Directory); + } + + #[test] + fn test_file_stat_set_file_id() { + let mut file_stat = FileStat::new_file_filestat("a".as_ref(), "b".as_ref(), 10); + file_stat.set_file_id(1, 2); + assert_eq!(file_stat.file_id, 2); + assert_eq!(file_stat.parent_file_id, 1); + } + + #[test] + #[should_panic(expected = "assertion failed: file_id != 0 && parent_file_id != 0")] + fn test_file_stat_set_file_id_panic() { + let mut file_stat = FileStat::new_file_filestat("a".as_ref(), "b".as_ref(), 10); + file_stat.set_file_id(1, 0); + } +} diff --git a/clients/filesystem-fuse/src/fuse_api_handle.rs b/clients/filesystem-fuse/src/fuse_api_handle.rs new file mode 100644 index 00000000000..15679a222bd --- /dev/null +++ b/clients/filesystem-fuse/src/fuse_api_handle.rs @@ -0,0 +1,512 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +use crate::config::AppConfig; +use crate::filesystem::{FileStat, FileSystemContext, RawFileSystem}; +use fuse3::path::prelude::{ReplyData, ReplyOpen, ReplyStatFs, ReplyWrite}; +use fuse3::path::Request; +use fuse3::raw::prelude::{ + FileAttr, ReplyAttr, ReplyCreated, ReplyDirectory, ReplyDirectoryPlus, ReplyEntry, ReplyInit, +}; +use fuse3::raw::reply::{DirectoryEntry, DirectoryEntryPlus}; +use fuse3::raw::Filesystem; +use fuse3::FileType::{Directory, RegularFile}; +use fuse3::{Errno, FileType, Inode, SetAttr, Timestamp}; +use futures_util::stream; +use futures_util::stream::BoxStream; +use futures_util::StreamExt; +use std::ffi::{OsStr, OsString}; +use std::num::NonZeroU32; +use std::time::{Duration, SystemTime}; + +pub(crate) struct FuseApiHandle { + fs: T, + default_ttl: Duration, + fs_context: FileSystemContext, +} + +impl FuseApiHandle { + const DEFAULT_ATTR_TTL: Duration = Duration::from_secs(1); + const DEFAULT_MAX_WRITE_SIZE: u32 = 16 * 1024; + + pub fn new(fs: T, _config: &AppConfig, context: FileSystemContext) -> Self { + Self { + fs: fs, + default_ttl: Self::DEFAULT_ATTR_TTL, + fs_context: context, + } + } + + async fn get_modified_file_stat( + &self, + file_id: u64, + size: Option, + atime: Option, + mtime: Option, + ) -> Result { + let mut file_stat = self.fs.stat(file_id).await?; + + if let Some(size) = size { + file_stat.size = size; + }; + + if let Some(atime) = atime { + file_stat.atime = atime; + }; + + if let Some(mtime) = mtime { + file_stat.mtime = mtime; + }; + + Ok(file_stat) + } +} + +impl Filesystem for FuseApiHandle { + async fn init(&self, _req: Request) -> fuse3::Result { + self.fs.init().await?; + Ok(ReplyInit { + max_write: NonZeroU32::new(Self::DEFAULT_MAX_WRITE_SIZE).unwrap(), + }) + } + + async fn destroy(&self, _req: Request) { + //TODO need to call the destroy method of the local_fs + } + + async fn lookup( + &self, + _req: Request, + parent: Inode, + name: &OsStr, + ) -> fuse3::Result { + let file_stat = self.fs.lookup(parent, name).await?; + Ok(ReplyEntry { + ttl: self.default_ttl, + attr: fstat_to_file_attr(&file_stat, &self.fs_context), + generation: 0, + }) + } + + async fn getattr( + &self, + _req: Request, + inode: Inode, + fh: Option, + _flags: u32, + ) -> fuse3::Result { + // check the fh is associated with the file_id + if let Some(fh) = fh { + self.fs.valid_file_handle_id(inode, fh).await?; + } + + let file_stat = self.fs.stat(inode).await?; + Ok(ReplyAttr { + ttl: self.default_ttl, + attr: fstat_to_file_attr(&file_stat, &self.fs_context), + }) + } + + async fn setattr( + &self, + _req: Request, + inode: Inode, + fh: Option, + set_attr: SetAttr, + ) -> fuse3::Result { + // check the fh is associated with the file_id + if let Some(fh) = fh { + self.fs.valid_file_handle_id(inode, fh).await?; + } + + let new_file_stat = self + .get_modified_file_stat(inode, set_attr.size, set_attr.atime, set_attr.mtime) + .await?; + let attr = fstat_to_file_attr(&new_file_stat, &self.fs_context); + self.fs.set_attr(inode, &new_file_stat).await?; + Ok(ReplyAttr { + ttl: self.default_ttl, + attr: attr, + }) + } + + async fn mkdir( + &self, + _req: Request, + parent: Inode, + name: &OsStr, + _mode: u32, + _umask: u32, + ) -> fuse3::Result { + let handle_id = self.fs.create_dir(parent, name).await?; + Ok(ReplyEntry { + ttl: self.default_ttl, + attr: dummy_file_attr( + handle_id, + Directory, + Timestamp::from(SystemTime::now()), + &self.fs_context, + ), + generation: 0, + }) + } + + async fn unlink(&self, _req: Request, parent: Inode, name: &OsStr) -> fuse3::Result<()> { + self.fs.remove_file(parent, name).await?; + Ok(()) + } + + async fn rmdir(&self, _req: Request, parent: Inode, name: &OsStr) -> fuse3::Result<()> { + self.fs.remove_dir(parent, name).await?; + Ok(()) + } + + async fn open(&self, _req: Request, inode: Inode, flags: u32) -> fuse3::Result { + let file_handle = self.fs.open_file(inode, flags).await?; + Ok(ReplyOpen { + fh: file_handle.handle_id, + flags: flags, + }) + } + + async fn read( + &self, + _req: Request, + inode: Inode, + fh: u64, + offset: u64, + size: u32, + ) -> fuse3::Result { + let data = self.fs.read(inode, fh, offset, size).await?; + Ok(ReplyData { data: data }) + } + + async fn write( + &self, + _req: Request, + inode: Inode, + fh: u64, + offset: u64, + data: &[u8], + _write_flags: u32, + _flags: u32, + ) -> fuse3::Result { + let written = self.fs.write(inode, fh, offset, data).await?; + Ok(ReplyWrite { written: written }) + } + + async fn statfs(&self, _req: Request, _inode: Inode) -> fuse3::Result { + //TODO: Implement statfs for the filesystem + Ok(ReplyStatFs { + blocks: 1000000, + bfree: 1000000, + bavail: 1000000, + files: 1000000, + ffree: 1000000, + bsize: 4096, + namelen: 255, + frsize: 4096, + }) + } + + async fn release( + &self, + _req: Request, + inode: Inode, + fh: u64, + _flags: u32, + _lock_owner: u64, + _flush: bool, + ) -> fuse3::Result<()> { + self.fs.close_file(inode, fh).await + } + + async fn flush( + &self, + _req: Request, + inode: Inode, + fh: u64, + _lock_owner: u64, + ) -> fuse3::Result<()> { + self.fs.flush_file(inode, fh).await + } + + async fn opendir(&self, _req: Request, inode: Inode, flags: u32) -> fuse3::Result { + let file_handle = self.fs.open_dir(inode, flags).await?; + Ok(ReplyOpen { + fh: file_handle.handle_id, + flags: flags, + }) + } + + type DirEntryStream<'a> + = BoxStream<'a, fuse3::Result> + where + T: 'a; + + #[allow(clippy::needless_lifetimes)] + async fn readdir<'a>( + &'a self, + _req: Request, + parent: Inode, + _fh: u64, + offset: i64, + ) -> fuse3::Result>> { + let current = self.fs.stat(parent).await?; + let files = self.fs.read_dir(parent).await?; + let entries_stream = + stream::iter(files.into_iter().enumerate().map(|(index, file_stat)| { + Ok(DirectoryEntry { + inode: file_stat.file_id, + name: file_stat.name.clone(), + kind: file_stat.kind, + offset: (index + 3) as i64, + }) + })); + + let relative_paths = stream::iter([ + Ok(DirectoryEntry { + inode: current.file_id, + name: ".".into(), + kind: Directory, + offset: 1, + }), + Ok(DirectoryEntry { + inode: current.parent_file_id, + name: "..".into(), + kind: Directory, + offset: 2, + }), + ]); + + //TODO Need to improve the read dir operation + let combined_stream = relative_paths.chain(entries_stream); + Ok(ReplyDirectory { + entries: combined_stream.skip(offset as usize).boxed(), + }) + } + + async fn releasedir( + &self, + _req: Request, + inode: Inode, + fh: u64, + _flags: u32, + ) -> fuse3::Result<()> { + self.fs.close_file(inode, fh).await + } + + async fn create( + &self, + _req: Request, + parent: Inode, + name: &OsStr, + _mode: u32, + flags: u32, + ) -> fuse3::Result { + let file_handle = self.fs.create_file(parent, name, flags).await?; + Ok(ReplyCreated { + ttl: self.default_ttl, + attr: dummy_file_attr( + file_handle.file_id, + RegularFile, + Timestamp::from(SystemTime::now()), + &self.fs_context, + ), + generation: 0, + fh: file_handle.handle_id, + flags: flags, + }) + } + + type DirEntryPlusStream<'a> + = BoxStream<'a, fuse3::Result> + where + T: 'a; + + #[allow(clippy::needless_lifetimes)] + async fn readdirplus<'a>( + &'a self, + _req: Request, + parent: Inode, + _fh: u64, + offset: u64, + _lock_owner: u64, + ) -> fuse3::Result>> { + let current = self.fs.stat(parent).await?; + let files = self.fs.read_dir(parent).await?; + let entries_stream = + stream::iter(files.into_iter().enumerate().map(|(index, file_stat)| { + Ok(DirectoryEntryPlus { + inode: file_stat.file_id, + name: file_stat.name.clone(), + kind: file_stat.kind, + offset: (index + 3) as i64, + attr: fstat_to_file_attr(&file_stat, &self.fs_context), + generation: 0, + entry_ttl: self.default_ttl, + attr_ttl: self.default_ttl, + }) + })); + + let relative_paths = stream::iter([ + Ok(DirectoryEntryPlus { + inode: current.file_id, + name: OsString::from("."), + kind: Directory, + offset: 1, + attr: fstat_to_file_attr(¤t, &self.fs_context), + generation: 0, + entry_ttl: self.default_ttl, + attr_ttl: self.default_ttl, + }), + Ok(DirectoryEntryPlus { + inode: current.parent_file_id, + name: OsString::from(".."), + kind: Directory, + offset: 2, + attr: dummy_file_attr( + current.parent_file_id, + Directory, + Timestamp::from(SystemTime::now()), + &self.fs_context, + ), + generation: 0, + entry_ttl: self.default_ttl, + attr_ttl: self.default_ttl, + }), + ]); + + //TODO Need to improve the read dir operation + let combined_stream = relative_paths.chain(entries_stream); + Ok(ReplyDirectoryPlus { + entries: combined_stream.skip(offset as usize).boxed(), + }) + } +} + +const fn fstat_to_file_attr(file_st: &FileStat, context: &FileSystemContext) -> FileAttr { + debug_assert!(file_st.file_id != 0 && file_st.parent_file_id != 0); + let perm = match file_st.kind { + Directory => context.default_dir_perm, + _ => context.default_file_perm, + }; + FileAttr { + ino: file_st.file_id, + size: file_st.size, + blocks: (file_st.size + context.block_size as u64 - 1) / context.block_size as u64, + atime: file_st.atime, + mtime: file_st.mtime, + ctime: file_st.ctime, + kind: file_st.kind, + perm: perm, + nlink: file_st.nlink, + uid: context.uid, + gid: context.gid, + rdev: 0, + blksize: context.block_size, + #[cfg(target_os = "macos")] + crtime: file_st.ctime, + #[cfg(target_os = "macos")] + flags: 0, + } +} + +const fn dummy_file_attr( + file_id: u64, + kind: FileType, + now: Timestamp, + context: &FileSystemContext, +) -> FileAttr { + debug_assert!(file_id != 0); + let mode = match kind { + Directory => context.default_dir_perm, + _ => context.default_file_perm, + }; + FileAttr { + ino: file_id, + size: 0, + blocks: 1, + atime: now, + mtime: now, + ctime: now, + kind, + perm: mode, + nlink: 0, + uid: context.uid, + gid: context.gid, + rdev: 0, + blksize: context.block_size, + #[cfg(target_os = "macos")] + crtime: now, + #[cfg(target_os = "macos")] + flags: 0, + } +} + +#[cfg(test)] +mod test { + use crate::filesystem::{FileStat, FileSystemContext}; + use crate::fuse_api_handle::fstat_to_file_attr; + use fuse3::{FileType, Timestamp}; + + #[test] + fn test_fstat_to_file_attr() { + let file_stat = FileStat { + file_id: 1, + parent_file_id: 3, + name: "test".into(), + path: "".into(), + size: 10032, + kind: FileType::RegularFile, + atime: Timestamp { sec: 10, nsec: 3 }, + mtime: Timestamp { sec: 12, nsec: 5 }, + ctime: Timestamp { sec: 15, nsec: 7 }, + nlink: 0, + }; + + let context = FileSystemContext { + uid: 1, + gid: 2, + default_file_perm: 0o644, + default_dir_perm: 0o755, + block_size: 4 * 1024, + }; + + let file_attr = fstat_to_file_attr(&file_stat, &context); + + assert_eq!(file_attr.ino, 1); + assert_eq!(file_attr.size, 10032); + assert_eq!(file_attr.blocks, 3); + assert_eq!(file_attr.atime, Timestamp { sec: 10, nsec: 3 }); + assert_eq!(file_attr.mtime, Timestamp { sec: 12, nsec: 5 }); + assert_eq!(file_attr.ctime, Timestamp { sec: 15, nsec: 7 }); + assert_eq!(file_attr.kind, FileType::RegularFile); + assert_eq!(file_attr.perm, context.default_file_perm); + assert_eq!(file_attr.nlink, 0); + assert_eq!(file_attr.uid, 1); + assert_eq!(file_attr.gid, 2); + assert_eq!(file_attr.rdev, 0); + assert_eq!(file_attr.blksize, 4 * 1024); + #[cfg(target_os = "macos")] + assert_eq!(file_attr.crtime, Timestamp { sec: 15, nsec: 7 }); + #[cfg(target_os = "macos")] + assert_eq!(file_attr.flags, 0); + } +} diff --git a/clients/filesystem-fuse/src/fuse_server.rs b/clients/filesystem-fuse/src/fuse_server.rs new file mode 100644 index 00000000000..a059686e16c --- /dev/null +++ b/clients/filesystem-fuse/src/fuse_server.rs @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +use crate::utils::GvfsResult; +use fuse3::raw::{Filesystem, Session}; +use fuse3::MountOptions; +use log::{error, info}; +use std::process::exit; +use std::sync::Arc; +use tokio::select; +use tokio::sync::Notify; + +/// Represents a FUSE server capable of starting and stopping the FUSE filesystem. +pub struct FuseServer { + // Notification for stop + close_notify: Arc, + + // Mount point of the FUSE filesystem + mount_point: String, +} + +impl FuseServer { + /// Creates a new instance of `FuseServer`. + pub fn new(mount_point: &str) -> Self { + Self { + close_notify: Arc::new(Default::default()), + mount_point: mount_point.to_string(), + } + } + + /// Starts the FUSE filesystem and blocks until it is stopped. + pub async fn start(&self, fuse_fs: impl Filesystem + Sync + 'static) -> GvfsResult<()> { + //check if the mount point exists + if !std::path::Path::new(&self.mount_point).exists() { + error!("Mount point {} does not exist", self.mount_point); + exit(libc::ENOENT); + } + + info!( + "Starting FUSE filesystem and mounting at {}", + self.mount_point + ); + + let mount_options = MountOptions::default(); + let mut mount_handle = Session::new(mount_options) + .mount_with_unprivileged(fuse_fs, &self.mount_point) + .await?; + + let handle = &mut mount_handle; + + select! { + res = handle => { + if res.is_err() { + error!("Failed to mount FUSE filesystem: {:?}", res.err()); + } + }, + _ = self.close_notify.notified() => { + if let Err(e) = mount_handle.unmount().await { + error!("Failed to unmount FUSE filesystem: {:?}", e); + } else { + info!("FUSE filesystem unmounted successfully."); + } + } + } + + // notify that the filesystem is stopped + self.close_notify.notify_one(); + Ok(()) + } + + /// Stops the FUSE filesystem. + pub async fn stop(&self) -> GvfsResult<()> { + info!("Stopping FUSE filesystem..."); + self.close_notify.notify_one(); + + // wait for the filesystem to stop + self.close_notify.notified().await; + Ok(()) + } +} diff --git a/clients/filesystem-fuse/src/gravitino_client.rs b/clients/filesystem-fuse/src/gravitino_client.rs new file mode 100644 index 00000000000..9bdfbb2c288 --- /dev/null +++ b/clients/filesystem-fuse/src/gravitino_client.rs @@ -0,0 +1,353 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +use crate::config::GravitinoConfig; +use crate::error::{ErrorCode, GvfsError}; +use reqwest::Client; +use serde::Deserialize; +use std::collections::HashMap; +use std::fmt::Debug; +use urlencoding::encode; + +#[derive(Debug, Deserialize)] +pub(crate) struct Fileset { + pub(crate) name: String, + #[serde(rename = "type")] + pub(crate) fileset_type: String, + comment: String, + #[serde(rename = "storageLocation")] + pub(crate) storage_location: String, + properties: HashMap, +} + +#[derive(Debug, Deserialize)] +struct FilesetResponse { + code: u32, + fileset: Fileset, +} + +#[derive(Debug, Deserialize)] +struct FileLocationResponse { + code: u32, + #[serde(rename = "fileLocation")] + location: String, +} + +#[derive(Debug, Deserialize)] +pub(crate) struct Catalog { + pub(crate) name: String, + #[serde(rename = "type")] + pub(crate) catalog_type: String, + provider: String, + comment: String, + pub(crate) properties: HashMap, +} + +#[derive(Debug, Deserialize)] +struct CatalogResponse { + code: u32, + catalog: Catalog, +} + +pub(crate) struct GravitinoClient { + gravitino_uri: String, + metalake: String, + + client: Client, +} + +impl GravitinoClient { + pub fn new(config: &GravitinoConfig) -> Self { + Self { + gravitino_uri: config.uri.clone(), + metalake: config.metalake.clone(), + client: Client::new(), + } + } + + pub fn init(&self) {} + + pub fn do_post(&self, _path: &str, _data: &str) { + todo!() + } + + pub fn request(&self, _path: &str, _data: &str) -> Result<(), GvfsError> { + todo!() + } + + pub fn list_schema(&self) -> Result<(), GvfsError> { + todo!() + } + + pub fn list_fileset(&self) -> Result<(), GvfsError> { + todo!() + } + + fn get_fileset_url(&self, catalog_name: &str, schema_name: &str, fileset_name: &str) -> String { + format!( + "{}/api/metalakes/{}/catalogs/{}/schemas/{}/filesets/{}", + self.gravitino_uri, self.metalake, catalog_name, schema_name, fileset_name + ) + } + + async fn do_get(&self, url: &str) -> Result + where + T: for<'de> Deserialize<'de>, + { + let http_resp = + self.client.get(url).send().await.map_err(|e| { + GvfsError::RestError(format!("Failed to send request to {}", url), e) + })?; + + let res = http_resp.json::().await.map_err(|e| { + GvfsError::RestError(format!("Failed to parse response from {}", url), e) + })?; + + Ok(res) + } + + pub async fn get_catalog_url(&self, catalog_name: &str) -> String { + format!( + "{}/api/metalakes/{}/catalogs/{}", + self.gravitino_uri, self.metalake, catalog_name + ) + } + + pub async fn get_catalog(&self, catalog_name: &str) -> Result { + let url = self.get_catalog_url(catalog_name).await; + let res = self.do_get::(&url).await?; + + if res.code != 0 { + return Err(GvfsError::Error( + ErrorCode::GravitinoClientError, + "Failed to get catalog".to_string(), + )); + } + Ok(res.catalog) + } + + pub async fn get_fileset( + &self, + catalog_name: &str, + schema_name: &str, + fileset_name: &str, + ) -> Result { + let url = self.get_fileset_url(catalog_name, schema_name, fileset_name); + let res = self.do_get::(&url).await?; + + if res.code != 0 { + return Err(GvfsError::Error( + ErrorCode::GravitinoClientError, + "Failed to get fileset".to_string(), + )); + } + Ok(res.fileset) + } + + pub fn get_file_location_url( + &self, + catalog_name: &str, + schema_name: &str, + fileset_name: &str, + path: &str, + ) -> String { + let encoded_path = encode(path); + format!( + "{}/api/metalakes/{}/catalogs/{}/schemas/{}/filesets/{}/location?sub_path={}", + self.gravitino_uri, + self.metalake, + catalog_name, + schema_name, + fileset_name, + encoded_path + ) + } + + pub async fn get_file_location( + &self, + catalog_name: &str, + schema_name: &str, + fileset_name: &str, + path: &str, + ) -> Result { + let url = self.get_file_location_url(catalog_name, schema_name, fileset_name, path); + let res = self.do_get::(&url).await?; + + if res.code != 0 { + return Err(GvfsError::Error( + ErrorCode::GravitinoClientError, + "Failed to get file location".to_string(), + )); + } + Ok(res.location) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use mockito::mock; + + #[tokio::test] + async fn test_get_fileset_success() { + let fileset_response = r#" + { + "code": 0, + "fileset": { + "name": "example_fileset", + "type": "example_type", + "comment": "This is a test fileset", + "storageLocation": "/example/path", + "properties": { + "key1": "value1", + "key2": "value2" + } + } + }"#; + + let mock_server_url = &mockito::server_url(); + + let url = format!( + "/api/metalakes/{}/catalogs/{}/schemas/{}/filesets/{}", + "test", "catalog1", "schema1", "fileset1" + ); + let _m = mock("GET", url.as_str()) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(fileset_response) + .create(); + + let config = GravitinoConfig { + uri: mock_server_url.to_string(), + metalake: "test".to_string(), + }; + let client = GravitinoClient::new(&config); + + let result = client.get_fileset("catalog1", "schema1", "fileset1").await; + + match result { + Ok(fileset) => { + assert_eq!(fileset.name, "example_fileset"); + assert_eq!(fileset.fileset_type, "example_type"); + assert_eq!(fileset.storage_location, "/example/path"); + assert_eq!(fileset.properties.get("key1"), Some(&"value1".to_string())); + } + Err(e) => panic!("Expected Ok, but got Err: {:?}", e), + } + } + + #[tokio::test] + async fn test_get_file_location_success() { + let file_location_response = r#" + { + "code": 0, + "fileLocation": "/mybucket/a" + }"#; + + let mock_server_url = &mockito::server_url(); + + let url = format!( + "/api/metalakes/{}/catalogs/{}/schemas/{}/filesets/{}/location?sub_path={}", + "test", + "catalog1", + "schema1", + "fileset1", + encode("/example/path") + ); + let _m = mock("GET", url.as_str()) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(file_location_response) + .create(); + + let config = GravitinoConfig { + uri: mock_server_url.to_string(), + metalake: "test".to_string(), + }; + let client = GravitinoClient::new(&config); + + let result = client + .get_file_location("catalog1", "schema1", "fileset1", "/example/path") + .await; + + match result { + Ok(location) => { + assert_eq!(location, "/mybucket/a"); + } + Err(e) => panic!("Expected Ok, but got Err: {:?}", e), + } + } + + #[tokio::test] + async fn test_get_catalog_success() { + let catalog_response = r#" + { + "code": 0, + "catalog": { + "name": "example_catalog", + "type": "example_type", + "provider": "example_provider", + "comment": "This is a test catalog", + "properties": { + "key1": "value1", + "key2": "value2" + } + } + }"#; + + let mock_server_url = &mockito::server_url(); + + let url = format!("/api/metalakes/{}/catalogs/{}", "test", "catalog1"); + let _m = mock("GET", url.as_str()) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(catalog_response) + .create(); + + let config = GravitinoConfig { + uri: mock_server_url.to_string(), + metalake: "test".to_string(), + }; + let client = GravitinoClient::new(&config); + + let result = client.get_catalog("catalog1").await; + + match result { + Ok(_) => {} + Err(e) => panic!("Expected Ok, but got Err: {:?}", e), + } + } + + async fn get_fileset_example() { + tracing_subscriber::fmt::init(); + let config = GravitinoConfig { + uri: "http://localhost:8090".to_string(), + metalake: "test".to_string(), + }; + let client = GravitinoClient::new(&config); + client.init(); + let result = client.get_fileset("c1", "s1", "fileset1").await; + if let Err(e) = &result { + println!("{:?}", e); + } + + let fileset = result.unwrap(); + println!("{:?}", fileset); + assert_eq!(fileset.name, "fileset1"); + } +} diff --git a/clients/filesystem-fuse/src/gravitino_fileset_filesystem.rs b/clients/filesystem-fuse/src/gravitino_fileset_filesystem.rs new file mode 100644 index 00000000000..7da2f572dcc --- /dev/null +++ b/clients/filesystem-fuse/src/gravitino_fileset_filesystem.rs @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +use crate::config::AppConfig; +use crate::filesystem::{FileStat, FileSystemCapacity, FileSystemContext, PathFileSystem, Result}; +use crate::gravitino_client::GravitinoClient; +use crate::opened_file::{OpenFileFlags, OpenedFile}; +use async_trait::async_trait; +use fuse3::Errno; +use std::path::{Path, PathBuf}; + +/// GravitinoFileSystem is a filesystem that is associated with a fileset in Gravitino. +/// It mapping the fileset path to the original data storage path. and delegate the operation +/// to the inner filesystem like S3 GCS, JuiceFS. +pub(crate) struct GravitinoFilesetFileSystem { + physical_fs: Box, + client: GravitinoClient, + // location is a absolute path in the physical filesystem that is associated with the fileset. + // e.g. fileset location : s3://bucket/path/to/file the location is /path/to/file + location: PathBuf, +} + +impl GravitinoFilesetFileSystem { + pub async fn new( + fs: Box, + target_path: &Path, + client: GravitinoClient, + _config: &AppConfig, + _context: &FileSystemContext, + ) -> Self { + Self { + physical_fs: fs, + client: client, + location: target_path.into(), + } + } + + fn gvfs_path_to_raw_path(&self, path: &Path) -> PathBuf { + let relation_path = path.strip_prefix("/").expect("path should start with /"); + if relation_path == Path::new("") { + return self.location.clone(); + } + self.location.join(relation_path) + } + + fn raw_path_to_gvfs_path(&self, path: &Path) -> Result { + let stripped_path = path + .strip_prefix(&self.location) + .map_err(|_| Errno::from(libc::EBADF))?; + let mut result_path = PathBuf::from("/"); + result_path.push(stripped_path); + Ok(result_path) + } +} + +#[async_trait] +impl PathFileSystem for GravitinoFilesetFileSystem { + async fn init(&self) -> Result<()> { + self.physical_fs.init().await + } + + async fn stat(&self, path: &Path) -> Result { + let raw_path = self.gvfs_path_to_raw_path(path); + let mut file_stat = self.physical_fs.stat(&raw_path).await?; + file_stat.path = self.raw_path_to_gvfs_path(&file_stat.path)?; + Ok(file_stat) + } + + async fn read_dir(&self, path: &Path) -> Result> { + let raw_path = self.gvfs_path_to_raw_path(path); + let mut child_filestats = self.physical_fs.read_dir(&raw_path).await?; + for file_stat in child_filestats.iter_mut() { + file_stat.path = self.raw_path_to_gvfs_path(&file_stat.path)?; + } + Ok(child_filestats) + } + + async fn open_file(&self, path: &Path, flags: OpenFileFlags) -> Result { + let raw_path = self.gvfs_path_to_raw_path(path); + let mut opened_file = self.physical_fs.open_file(&raw_path, flags).await?; + opened_file.file_stat.path = self.raw_path_to_gvfs_path(&opened_file.file_stat.path)?; + Ok(opened_file) + } + + async fn open_dir(&self, path: &Path, flags: OpenFileFlags) -> Result { + let raw_path = self.gvfs_path_to_raw_path(path); + let mut opened_file = self.physical_fs.open_dir(&raw_path, flags).await?; + opened_file.file_stat.path = self.raw_path_to_gvfs_path(&opened_file.file_stat.path)?; + Ok(opened_file) + } + + async fn create_file(&self, path: &Path, flags: OpenFileFlags) -> Result { + let raw_path = self.gvfs_path_to_raw_path(path); + let mut opened_file = self.physical_fs.create_file(&raw_path, flags).await?; + opened_file.file_stat.path = self.raw_path_to_gvfs_path(&opened_file.file_stat.path)?; + Ok(opened_file) + } + + async fn create_dir(&self, path: &Path) -> Result { + let raw_path = self.gvfs_path_to_raw_path(path); + let mut file_stat = self.physical_fs.create_dir(&raw_path).await?; + file_stat.path = self.raw_path_to_gvfs_path(&file_stat.path)?; + Ok(file_stat) + } + + async fn set_attr(&self, path: &Path, file_stat: &FileStat, flush: bool) -> Result<()> { + let raw_path = self.gvfs_path_to_raw_path(path); + self.physical_fs.set_attr(&raw_path, file_stat, flush).await + } + + async fn remove_file(&self, path: &Path) -> Result<()> { + let raw_path = self.gvfs_path_to_raw_path(path); + self.physical_fs.remove_file(&raw_path).await + } + + async fn remove_dir(&self, path: &Path) -> Result<()> { + let raw_path = self.gvfs_path_to_raw_path(path); + self.physical_fs.remove_dir(&raw_path).await + } + + fn get_capacity(&self) -> Result { + self.physical_fs.get_capacity() + } +} + +#[cfg(test)] +mod tests { + use crate::config::GravitinoConfig; + use crate::gravitino_fileset_filesystem::GravitinoFilesetFileSystem; + use crate::memory_filesystem::MemoryFileSystem; + use std::path::Path; + + #[tokio::test] + async fn test_map_fileset_path_to_raw_path() { + let fs = GravitinoFilesetFileSystem { + physical_fs: Box::new(MemoryFileSystem::new().await), + client: super::GravitinoClient::new(&GravitinoConfig::default()), + location: "/c1/fileset1".into(), + }; + let path = fs.gvfs_path_to_raw_path(Path::new("/a")); + assert_eq!(path, Path::new("/c1/fileset1/a")); + let path = fs.gvfs_path_to_raw_path(Path::new("/")); + assert_eq!(path, Path::new("/c1/fileset1")); + } + + #[tokio::test] + async fn test_map_raw_path_to_fileset_path() { + let fs = GravitinoFilesetFileSystem { + physical_fs: Box::new(MemoryFileSystem::new().await), + client: super::GravitinoClient::new(&GravitinoConfig::default()), + location: "/c1/fileset1".into(), + }; + let path = fs + .raw_path_to_gvfs_path(Path::new("/c1/fileset1/a")) + .unwrap(); + assert_eq!(path, Path::new("/a")); + let path = fs.raw_path_to_gvfs_path(Path::new("/c1/fileset1")).unwrap(); + assert_eq!(path, Path::new("/")); + } +} diff --git a/clients/filesystem-fuse/src/gvfs_creator.rs b/clients/filesystem-fuse/src/gvfs_creator.rs new file mode 100644 index 00000000000..aac88ad9d08 --- /dev/null +++ b/clients/filesystem-fuse/src/gvfs_creator.rs @@ -0,0 +1,166 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +use crate::config::AppConfig; +use crate::error::ErrorCode::{InvalidConfig, UnSupportedFilesystem}; +use crate::filesystem::{FileSystemContext, PathFileSystem}; +use crate::gravitino_client::{Catalog, Fileset, GravitinoClient}; +use crate::gravitino_fileset_filesystem::GravitinoFilesetFileSystem; +use crate::gvfs_fuse::{CreateFileSystemResult, FileSystemSchema}; +use crate::s3_filesystem::S3FileSystem; +use crate::utils::{extract_root_path, parse_location, GvfsResult}; + +const GRAVITINO_FILESET_SCHEMA: &str = "gvfs"; + +pub async fn create_gvfs_filesystem( + mount_from: &str, + config: &AppConfig, + fs_context: &FileSystemContext, +) -> GvfsResult { + // Gvfs-fuse filesystem structure: + // FuseApiHandle + // ├─ DefaultRawFileSystem (RawFileSystem) + // │ └─ FileSystemLog (PathFileSystem) + // │ ├─ GravitinoComposedFileSystem (PathFileSystem) + // │ │ ├─ GravitinoFilesetFileSystem (PathFileSystem) + // │ │ │ └─ S3FileSystem (PathFileSystem) + // │ │ │ └─ OpenDALFileSystem (PathFileSystem) + // │ │ ├─ GravitinoFilesetFileSystem (PathFileSystem) + // │ │ │ └─ HDFSFileSystem (PathFileSystem) + // │ │ │ └─ OpenDALFileSystem (PathFileSystem) + // │ │ ├─ GravitinoFilesetFileSystem (PathFileSystem) + // │ │ │ └─ JuiceFileSystem (PathFileSystem) + // │ │ │ └─ NasFileSystem (PathFileSystem) + // │ │ ├─ GravitinoFilesetFileSystem (PathFileSystem) + // │ │ │ └─ XXXFileSystem (PathFileSystem) + // + // `SimpleFileSystem` is a low-level filesystem designed to communicate with FUSE APIs. + // It manages file and directory relationships, as well as file mappings. + // It delegates file operations to the PathFileSystem + // + // `FileSystemLog` is a decorator that adds extra debug logging functionality to file system APIs. + // Similar implementations include permissions, caching, and metrics. + // + // `GravitinoComposeFileSystem` is a composite file system that can combine multiple `GravitinoFilesetFileSystem`. + // It use the part of catalog and schema of fileset path to a find actual GravitinoFilesetFileSystem. delegate the operation to the real storage. + // If the user only mounts a fileset, this layer is not present. There will only be one below layer. + // + // `GravitinoFilesetFileSystem` is a file system that can access a fileset.It translates the fileset path to the real storage path. + // and delegate the operation to the real storage. + // + // `OpenDALFileSystem` is a file system that use the OpenDAL to access real storage. + // it can assess the S3, HDFS, gcs, azblob and other storage. + // + // `S3FileSystem` is a file system that use `OpenDALFileSystem` to access S3 storage. + // + // `HDFSFileSystem` is a file system that use `OpenDALFileSystem` to access HDFS storage. + // + // `NasFileSystem` is a filesystem that uses a locally accessible path mounted by NAS tools, such as JuiceFS. + // + // `JuiceFileSystem` is a file that use `NasFileSystem` to access JuiceFS storage. + // + // `XXXFileSystem is a filesystem that allows you to implement file access through your own extensions. + + let client = GravitinoClient::new(&config.gravitino); + + let (catalog_name, schema_name, fileset_name) = extract_fileset(mount_from)?; + let catalog = client.get_catalog(&catalog_name).await?; + if catalog.catalog_type != "fileset" { + return Err(InvalidConfig.to_error(format!("Catalog {} is not a fileset", catalog_name))); + } + let fileset = client + .get_fileset(&catalog_name, &schema_name, &fileset_name) + .await?; + + let inner_fs = create_fs_with_fileset(&catalog, &fileset, config, fs_context)?; + + let target_path = extract_root_path(fileset.storage_location.as_str())?; + let fs = + GravitinoFilesetFileSystem::new(inner_fs, &target_path, client, config, fs_context).await; + Ok(CreateFileSystemResult::Gvfs(fs)) +} + +fn create_fs_with_fileset( + catalog: &Catalog, + fileset: &Fileset, + config: &AppConfig, + fs_context: &FileSystemContext, +) -> GvfsResult> { + let schema = extract_filesystem_scheme(&fileset.storage_location)?; + + match schema { + FileSystemSchema::S3 => Ok(Box::new(S3FileSystem::new( + catalog, fileset, config, fs_context, + )?)), + } +} + +pub fn extract_fileset(path: &str) -> GvfsResult<(String, String, String)> { + let path = parse_location(path)?; + + if path.scheme() != GRAVITINO_FILESET_SCHEMA { + return Err(InvalidConfig.to_error(format!("Invalid fileset schema: {}", path))); + } + + let split = path.path_segments(); + if split.is_none() { + return Err(InvalidConfig.to_error(format!("Invalid fileset path: {}", path))); + } + let split = split.unwrap().collect::>(); + if split.len() != 4 { + return Err(InvalidConfig.to_error(format!("Invalid fileset path: {}", path))); + } + + let catalog = split[1].to_string(); + let schema = split[2].to_string(); + let fileset = split[3].to_string(); + Ok((catalog, schema, fileset)) +} + +pub fn extract_filesystem_scheme(path: &str) -> GvfsResult { + let url = parse_location(path)?; + let scheme = url.scheme(); + + match scheme { + "s3" => Ok(FileSystemSchema::S3), + "s3a" => Ok(FileSystemSchema::S3), + _ => Err(UnSupportedFilesystem.to_error(format!("Invalid storage schema: {}", path))), + } +} + +#[cfg(test)] +mod tests { + use crate::gvfs_creator::extract_fileset; + use crate::gvfs_fuse::FileSystemSchema; + + #[test] + fn test_extract_fileset() { + let location = "gvfs://fileset/test/c1/s1/fileset1"; + let (catalog, schema, fileset) = extract_fileset(location).unwrap(); + assert_eq!(catalog, "c1"); + assert_eq!(schema, "s1"); + assert_eq!(fileset, "fileset1"); + } + + #[test] + fn test_extract_schema() { + let location = "s3://bucket/path/to/file"; + let schema = super::extract_filesystem_scheme(location).unwrap(); + assert_eq!(schema, FileSystemSchema::S3); + } +} diff --git a/clients/filesystem-fuse/src/gvfs_fuse.rs b/clients/filesystem-fuse/src/gvfs_fuse.rs new file mode 100644 index 00000000000..88079e99b91 --- /dev/null +++ b/clients/filesystem-fuse/src/gvfs_fuse.rs @@ -0,0 +1,127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +use crate::config::AppConfig; +use crate::default_raw_filesystem::DefaultRawFileSystem; +use crate::error::ErrorCode::UnSupportedFilesystem; +use crate::filesystem::FileSystemContext; +use crate::fuse_api_handle::FuseApiHandle; +use crate::fuse_server::FuseServer; +use crate::gravitino_fileset_filesystem::GravitinoFilesetFileSystem; +use crate::gvfs_creator::create_gvfs_filesystem; +use crate::memory_filesystem::MemoryFileSystem; +use crate::utils::GvfsResult; +use log::info; +use once_cell::sync::Lazy; +use std::sync::Arc; +use tokio::sync::Mutex; + +static SERVER: Lazy>>> = Lazy::new(|| Mutex::new(None)); + +pub(crate) enum CreateFileSystemResult { + Memory(MemoryFileSystem), + Gvfs(GravitinoFilesetFileSystem), + FuseMemoryFs(FuseApiHandle>), + FuseGvfs(FuseApiHandle>), + None, +} + +#[derive(Debug, PartialEq)] +pub enum FileSystemSchema { + S3, +} + +pub async fn mount(mount_to: &str, mount_from: &str, config: &AppConfig) -> GvfsResult<()> { + info!("Starting gvfs-fuse server..."); + let svr = Arc::new(FuseServer::new(mount_to)); + { + let mut server = SERVER.lock().await; + *server = Some(svr.clone()); + } + let fs = create_fuse_fs(mount_from, config).await?; + match fs { + CreateFileSystemResult::FuseMemoryFs(vfs) => svr.start(vfs).await?, + CreateFileSystemResult::FuseGvfs(vfs) => svr.start(vfs).await?, + _ => return Err(UnSupportedFilesystem.to_error("Unsupported filesystem type".to_string())), + } + Ok(()) +} + +pub async fn unmount() -> GvfsResult<()> { + info!("Stopping gvfs-fuse server..."); + let svr = { + let mut server = SERVER.lock().await; + if server.is_none() { + info!("Server is already stopped."); + return Ok(()); + } + server.take().unwrap() + }; + svr.stop().await +} + +pub(crate) async fn create_fuse_fs( + mount_from: &str, + config: &AppConfig, +) -> GvfsResult { + let uid = unsafe { libc::getuid() }; + let gid = unsafe { libc::getgid() }; + let fs_context = FileSystemContext::new(uid, gid, config); + let fs = create_path_fs(mount_from, config, &fs_context).await?; + create_raw_fs(fs, config, fs_context).await +} + +pub async fn create_raw_fs( + path_fs: CreateFileSystemResult, + config: &AppConfig, + fs_context: FileSystemContext, +) -> GvfsResult { + match path_fs { + CreateFileSystemResult::Memory(fs) => { + let fs = FuseApiHandle::new( + DefaultRawFileSystem::new(fs, config, &fs_context), + config, + fs_context, + ); + Ok(CreateFileSystemResult::FuseMemoryFs(fs)) + } + CreateFileSystemResult::Gvfs(fs) => { + let fs = FuseApiHandle::new( + DefaultRawFileSystem::new(fs, config, &fs_context), + config, + fs_context, + ); + Ok(CreateFileSystemResult::FuseGvfs(fs)) + } + _ => Err(UnSupportedFilesystem.to_error("Unsupported filesystem type".to_string())), + } +} + +pub async fn create_path_fs( + mount_from: &str, + config: &AppConfig, + fs_context: &FileSystemContext, +) -> GvfsResult { + if config.fuse.fs_type == "memory" { + Ok(CreateFileSystemResult::Memory( + MemoryFileSystem::new().await, + )) + } else { + create_gvfs_filesystem(mount_from, config, fs_context).await + } +} diff --git a/clients/filesystem-fuse/tests/it.rs b/clients/filesystem-fuse/src/lib.rs similarity index 56% rename from clients/filesystem-fuse/tests/it.rs rename to clients/filesystem-fuse/src/lib.rs index 989e5f9895e..31e7c7fd8e1 100644 --- a/clients/filesystem-fuse/tests/it.rs +++ b/clients/filesystem-fuse/src/lib.rs @@ -16,8 +16,30 @@ * specific language governing permissions and limitations * under the License. */ +use crate::config::AppConfig; +use crate::utils::GvfsResult; -#[test] -fn test_math_add() { - assert_eq!(1, 1); +pub mod config; +mod default_raw_filesystem; +mod error; +mod filesystem; +mod fuse_api_handle; +mod fuse_server; +mod gravitino_client; +mod gravitino_fileset_filesystem; +mod gvfs_creator; +mod gvfs_fuse; +mod memory_filesystem; +mod open_dal_filesystem; +mod opened_file; +mod opened_file_manager; +mod s3_filesystem; +mod utils; + +pub async fn gvfs_mount(mount_to: &str, mount_from: &str, config: &AppConfig) -> GvfsResult<()> { + gvfs_fuse::mount(mount_to, mount_from, config).await +} + +pub async fn gvfs_unmount() -> GvfsResult<()> { + gvfs_fuse::unmount().await } diff --git a/clients/filesystem-fuse/src/main.rs b/clients/filesystem-fuse/src/main.rs index 48b6ab5517e..3534e033465 100644 --- a/clients/filesystem-fuse/src/main.rs +++ b/clients/filesystem-fuse/src/main.rs @@ -16,15 +16,49 @@ * specific language governing permissions and limitations * under the License. */ - -use log::debug; -use log::info; -use std::process::exit; +use fuse3::Errno; +use gvfs_fuse::config::AppConfig; +use gvfs_fuse::{gvfs_mount, gvfs_unmount}; +use log::{error, info}; +use tokio::signal; #[tokio::main] -async fn main() { - tracing_subscriber::fmt().with_env_filter("debug").init(); - info!("Starting filesystem..."); - debug!("Shutdown filesystem..."); - exit(0); +async fn main() -> fuse3::Result<()> { + tracing_subscriber::fmt().init(); + + // todo need inmprove the args parsing + let args: Vec = std::env::args().collect(); + let (mount_point, mount_from, config_path) = match args.len() { + 4 => (args[1].clone(), args[2].clone(), args[3].clone()), + _ => { + error!("Usage: {} ", args[0]); + return Err(Errno::from(libc::EINVAL)); + } + }; + + //todo(read config file from args) + let config = AppConfig::from_file(Some(&config_path)); + if let Err(e) = &config { + error!("Failed to load config: {:?}", e); + return Err(Errno::from(libc::EINVAL)); + } + let config = config.unwrap(); + let handle = tokio::spawn(async move { + let result = gvfs_mount(&mount_point, &mount_from, &config).await; + if let Err(e) = result { + error!("Failed to mount gvfs: {:?}", e); + return Err(Errno::from(libc::EINVAL)); + } + Ok(()) + }); + + tokio::select! { + _ = handle => {} + _ = signal::ctrl_c() => { + info!("Received Ctrl+C, unmounting gvfs..."); + } + } + + let _ = gvfs_unmount().await; + Ok(()) } diff --git a/clients/filesystem-fuse/src/memory_filesystem.rs b/clients/filesystem-fuse/src/memory_filesystem.rs new file mode 100644 index 00000000000..f56e65ea33a --- /dev/null +++ b/clients/filesystem-fuse/src/memory_filesystem.rs @@ -0,0 +1,291 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +use crate::filesystem::{ + FileReader, FileStat, FileSystemCapacity, FileWriter, PathFileSystem, Result, +}; +use crate::opened_file::{OpenFileFlags, OpenedFile}; +use async_trait::async_trait; +use bytes::Bytes; +use fuse3::FileType::{Directory, RegularFile}; +use fuse3::{Errno, FileType}; +use std::collections::BTreeMap; +use std::path::{Path, PathBuf}; +use std::sync::{Arc, Mutex, RwLock}; + +// Simple in-memory file implementation of MemoryFileSystem +struct MemoryFile { + kind: FileType, + data: Arc>>, +} + +// MemoryFileSystem is a simple in-memory filesystem implementation +// It is used for testing purposes +pub struct MemoryFileSystem { + // file_map is a map of file name to file size + file_map: RwLock>, +} + +impl MemoryFileSystem { + pub(crate) async fn new() -> Self { + Self { + file_map: RwLock::new(Default::default()), + } + } + + fn create_file_stat(&self, path: &Path, file: &MemoryFile) -> FileStat { + match file.kind { + Directory => FileStat::new_dir_filestat_with_path(path), + _ => { + FileStat::new_file_filestat_with_path(path, file.data.lock().unwrap().len() as u64) + } + } + } +} + +#[async_trait] +impl PathFileSystem for MemoryFileSystem { + async fn init(&self) -> Result<()> { + let root_file = MemoryFile { + kind: Directory, + data: Arc::new(Mutex::new(Vec::new())), + }; + let root_path = PathBuf::from("/"); + self.file_map.write().unwrap().insert(root_path, root_file); + Ok(()) + } + + async fn stat(&self, path: &Path) -> Result { + self.file_map + .read() + .unwrap() + .get(path) + .map(|x| self.create_file_stat(path, x)) + .ok_or(Errno::from(libc::ENOENT)) + } + + async fn read_dir(&self, path: &Path) -> Result> { + let file_map = self.file_map.read().unwrap(); + + let results: Vec = file_map + .iter() + .filter(|x| path_in_dir(path, x.0)) + .map(|(k, v)| self.create_file_stat(k, v)) + .collect(); + + Ok(results) + } + + async fn open_file(&self, path: &Path, _flags: OpenFileFlags) -> Result { + let file_stat = self.stat(path).await?; + let mut opened_file = OpenedFile::new(file_stat); + match opened_file.file_stat.kind { + Directory => Ok(opened_file), + RegularFile => { + let data = self + .file_map + .read() + .unwrap() + .get(&opened_file.file_stat.path) + .unwrap() + .data + .clone(); + opened_file.reader = Some(Box::new(MemoryFileReader { data: data.clone() })); + opened_file.writer = Some(Box::new(MemoryFileWriter { data: data })); + Ok(opened_file) + } + _ => Err(Errno::from(libc::EBADF)), + } + } + + async fn open_dir(&self, path: &Path, flags: OpenFileFlags) -> Result { + self.open_file(path, flags).await + } + + async fn create_file(&self, path: &Path, _flags: OpenFileFlags) -> Result { + let mut file_map = self.file_map.write().unwrap(); + if file_map.contains_key(path) { + return Err(Errno::from(libc::EEXIST)); + } + + let mut opened_file = OpenedFile::new(FileStat::new_file_filestat_with_path(path, 0)); + + let data = Arc::new(Mutex::new(Vec::new())); + file_map.insert( + opened_file.file_stat.path.clone(), + MemoryFile { + kind: RegularFile, + data: data.clone(), + }, + ); + + opened_file.reader = Some(Box::new(MemoryFileReader { data: data.clone() })); + opened_file.writer = Some(Box::new(MemoryFileWriter { data: data })); + + Ok(opened_file) + } + + async fn create_dir(&self, path: &Path) -> Result { + let mut file_map = self.file_map.write().unwrap(); + if file_map.contains_key(path) { + return Err(Errno::from(libc::EEXIST)); + } + + let file = FileStat::new_dir_filestat_with_path(path); + file_map.insert( + file.path.clone(), + MemoryFile { + kind: Directory, + data: Arc::new(Mutex::new(Vec::new())), + }, + ); + + Ok(file) + } + + async fn set_attr(&self, _name: &Path, _file_stat: &FileStat, _flush: bool) -> Result<()> { + Ok(()) + } + + async fn remove_file(&self, path: &Path) -> Result<()> { + let mut file_map = self.file_map.write().unwrap(); + if file_map.remove(path).is_none() { + return Err(Errno::from(libc::ENOENT)); + } + Ok(()) + } + + async fn remove_dir(&self, path: &Path) -> Result<()> { + let mut file_map = self.file_map.write().unwrap(); + let count = file_map.iter().filter(|x| path_in_dir(path, x.0)).count(); + + if count != 0 { + return Err(Errno::from(libc::ENOTEMPTY)); + } + + if file_map.remove(path).is_none() { + return Err(Errno::from(libc::ENOENT)); + } + Ok(()) + } + + fn get_capacity(&self) -> Result { + Ok(FileSystemCapacity {}) + } +} + +pub(crate) struct MemoryFileReader { + pub(crate) data: Arc>>, +} + +#[async_trait] +impl FileReader for MemoryFileReader { + async fn read(&mut self, offset: u64, size: u32) -> Result { + let v = self.data.lock().unwrap(); + let start = offset as usize; + let end = usize::min(start + size as usize, v.len()); + if start >= v.len() { + return Ok(Bytes::default()); + } + Ok(v[start..end].to_vec().into()) + } +} + +pub(crate) struct MemoryFileWriter { + pub(crate) data: Arc>>, +} + +#[async_trait] +impl FileWriter for MemoryFileWriter { + async fn write(&mut self, offset: u64, data: &[u8]) -> Result { + let mut v = self.data.lock().unwrap(); + let start = offset as usize; + let end = start + data.len(); + + if v.len() < end { + v.resize(end, 0); + } + v[start..end].copy_from_slice(data); + Ok(data.len() as u32) + } +} + +fn path_in_dir(dir: &Path, path: &Path) -> bool { + if let Ok(relative_path) = path.strip_prefix(dir) { + relative_path.components().count() == 1 + } else { + false + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::AppConfig; + use crate::default_raw_filesystem::DefaultRawFileSystem; + use crate::filesystem::tests::{TestPathFileSystem, TestRawFileSystem}; + use crate::filesystem::{FileSystemContext, RawFileSystem}; + + #[test] + fn test_path_in_dir() { + let dir = Path::new("/parent"); + + let path1 = Path::new("/parent/child1"); + let path2 = Path::new("/parent/a.txt"); + let path3 = Path::new("/parent/child1/grandchild"); + let path4 = Path::new("/other"); + + assert!(!path_in_dir(dir, dir)); + assert!(path_in_dir(dir, path1)); + assert!(path_in_dir(dir, path2)); + assert!(!path_in_dir(dir, path3)); + assert!(!path_in_dir(dir, path4)); + + let dir = Path::new("/"); + + let path1 = Path::new("/child1"); + let path2 = Path::new("/a.txt"); + let path3 = Path::new("/child1/grandchild"); + + assert!(!path_in_dir(dir, dir)); + assert!(path_in_dir(dir, path1)); + assert!(path_in_dir(dir, path2)); + assert!(!path_in_dir(dir, path3)); + } + + #[tokio::test] + async fn test_memory_file_system() { + let fs = MemoryFileSystem::new().await; + let _ = fs.init().await; + let mut tester = TestPathFileSystem::new(Path::new("/ab"), fs); + tester.test_path_file_system().await; + } + + #[tokio::test] + async fn test_memory_file_system_with_raw_file_system() { + let memory_fs = MemoryFileSystem::new().await; + let raw_fs = DefaultRawFileSystem::new( + memory_fs, + &AppConfig::default(), + &FileSystemContext::default(), + ); + let _ = raw_fs.init().await; + let mut tester = TestRawFileSystem::new(Path::new("/ab"), raw_fs); + tester.test_raw_file_system().await; + } +} diff --git a/clients/filesystem-fuse/src/open_dal_filesystem.rs b/clients/filesystem-fuse/src/open_dal_filesystem.rs new file mode 100644 index 00000000000..e53fbaf6032 --- /dev/null +++ b/clients/filesystem-fuse/src/open_dal_filesystem.rs @@ -0,0 +1,297 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +use crate::config::AppConfig; +use crate::filesystem::{ + FileReader, FileStat, FileSystemCapacity, FileSystemContext, FileWriter, PathFileSystem, Result, +}; +use crate::opened_file::{OpenFileFlags, OpenedFile}; +use async_trait::async_trait; +use bytes::Bytes; +use fuse3::FileType::{Directory, RegularFile}; +use fuse3::{Errno, FileType, Timestamp}; +use log::error; +use opendal::{EntryMode, ErrorKind, Metadata, Operator}; +use std::path::{Path, PathBuf}; +use std::time::SystemTime; + +pub(crate) struct OpenDalFileSystem { + op: Operator, +} + +impl OpenDalFileSystem {} + +impl OpenDalFileSystem { + pub(crate) fn new(op: Operator, _config: &AppConfig, _fs_context: &FileSystemContext) -> Self { + Self { op: op } + } + + fn opendal_meta_to_file_stat(&self, meta: &Metadata, file_stat: &mut FileStat) { + let now = SystemTime::now(); + let mtime = meta.last_modified().map(|x| x.into()).unwrap_or(now); + + file_stat.size = meta.content_length(); + file_stat.kind = opendal_filemode_to_filetype(meta.mode()); + file_stat.ctime = Timestamp::from(mtime); + file_stat.atime = Timestamp::from(now); + file_stat.mtime = Timestamp::from(mtime); + } +} + +#[async_trait] +impl PathFileSystem for OpenDalFileSystem { + async fn init(&self) -> Result<()> { + Ok(()) + } + + async fn stat(&self, path: &Path) -> Result { + let file_name = path.to_string_lossy().to_string(); + let meta_result = self.op.stat(&file_name).await; + + // path may be a directory, so try to stat it as a directory + let meta = match meta_result { + Ok(meta) => meta, + Err(err) => { + if err.kind() == ErrorKind::NotFound { + let dir_name = build_dir_path(path); + self.op + .stat(&dir_name) + .await + .map_err(opendal_error_to_errno)? + } else { + return Err(opendal_error_to_errno(err)); + } + } + }; + + let mut file_stat = FileStat::new_file_filestat_with_path(path, 0); + self.opendal_meta_to_file_stat(&meta, &mut file_stat); + + Ok(file_stat) + } + + async fn read_dir(&self, path: &Path) -> Result> { + // dir name should end with '/' in opendal. + let dir_name = build_dir_path(path); + let entries = self + .op + .list(&dir_name) + .await + .map_err(opendal_error_to_errno)?; + entries + .iter() + .map(|entry| { + let mut path = PathBuf::from(path); + path.push(entry.name()); + + let mut file_stat = FileStat::new_file_filestat_with_path(&path, 0); + self.opendal_meta_to_file_stat(entry.metadata(), &mut file_stat); + Ok(file_stat) + }) + .collect() + } + + async fn open_file(&self, path: &Path, flags: OpenFileFlags) -> Result { + let file_stat = self.stat(path).await?; + debug_assert!(file_stat.kind == RegularFile); + + let mut file = OpenedFile::new(file_stat); + let file_name = path.to_string_lossy().to_string(); + if flags.is_read() { + let reader = self + .op + .reader_with(&file_name) + .await + .map_err(opendal_error_to_errno)?; + file.reader = Some(Box::new(FileReaderImpl { reader })); + } + if flags.is_write() || flags.is_create() || flags.is_append() || flags.is_truncate() { + let writer = self + .op + .writer_with(&file_name) + .await + .map_err(opendal_error_to_errno)?; + file.writer = Some(Box::new(FileWriterImpl { writer })); + } + Ok(file) + } + + async fn open_dir(&self, path: &Path, _flags: OpenFileFlags) -> Result { + let file_stat = self.stat(path).await?; + debug_assert!(file_stat.kind == Directory); + + let opened_file = OpenedFile::new(file_stat); + Ok(opened_file) + } + + async fn create_file(&self, path: &Path, flags: OpenFileFlags) -> Result { + let file_name = path.to_string_lossy().to_string(); + + let mut writer = self + .op + .writer_with(&file_name) + .await + .map_err(opendal_error_to_errno)?; + + writer.close().await.map_err(opendal_error_to_errno)?; + + let file = self.open_file(path, flags).await?; + Ok(file) + } + + async fn create_dir(&self, path: &Path) -> Result { + let dir_name = build_dir_path(path); + self.op + .create_dir(&dir_name) + .await + .map_err(opendal_error_to_errno)?; + let file_stat = self.stat(path).await?; + Ok(file_stat) + } + + async fn set_attr(&self, _path: &Path, _file_stat: &FileStat, _flush: bool) -> Result<()> { + // no need to implement + Ok(()) + } + + async fn remove_file(&self, path: &Path) -> Result<()> { + let file_name = path.to_string_lossy().to_string(); + self.op + .remove(vec![file_name]) + .await + .map_err(opendal_error_to_errno) + } + + async fn remove_dir(&self, path: &Path) -> Result<()> { + //todo:: need to consider keeping the behavior of posix remove dir when the dir is not empty + let dir_name = build_dir_path(path); + self.op + .remove(vec![dir_name]) + .await + .map_err(opendal_error_to_errno) + } + + fn get_capacity(&self) -> Result { + Ok(FileSystemCapacity {}) + } +} + +struct FileReaderImpl { + reader: opendal::Reader, +} + +#[async_trait] +impl FileReader for FileReaderImpl { + async fn read(&mut self, offset: u64, size: u32) -> Result { + let end = offset + size as u64; + let v = self + .reader + .read(offset..end) + .await + .map_err(opendal_error_to_errno)?; + Ok(v.to_bytes()) + } +} + +struct FileWriterImpl { + writer: opendal::Writer, +} + +#[async_trait] +impl FileWriter for FileWriterImpl { + async fn write(&mut self, _offset: u64, data: &[u8]) -> Result { + self.writer + .write(data.to_vec()) + .await + .map_err(opendal_error_to_errno)?; + Ok(data.len() as u32) + } + + async fn close(&mut self) -> Result<()> { + self.writer.close().await.map_err(opendal_error_to_errno)?; + Ok(()) + } +} + +fn build_dir_path(path: &Path) -> String { + let mut dir_path = path.to_string_lossy().to_string(); + if !dir_path.ends_with('/') { + dir_path.push('/'); + } + dir_path +} + +fn opendal_error_to_errno(err: opendal::Error) -> Errno { + error!("opendal operator error {:?}", err); + match err.kind() { + ErrorKind::Unsupported => Errno::from(libc::EOPNOTSUPP), + ErrorKind::IsADirectory => Errno::from(libc::EISDIR), + ErrorKind::NotFound => Errno::from(libc::ENOENT), + ErrorKind::PermissionDenied => Errno::from(libc::EACCES), + ErrorKind::AlreadyExists => Errno::from(libc::EEXIST), + ErrorKind::NotADirectory => Errno::from(libc::ENOTDIR), + ErrorKind::RateLimited => Errno::from(libc::EBUSY), + _ => Errno::from(libc::ENOENT), + } +} + +fn opendal_filemode_to_filetype(mode: EntryMode) -> FileType { + match mode { + EntryMode::DIR => Directory, + _ => RegularFile, + } +} + +#[cfg(test)] +mod test { + use crate::config::AppConfig; + use crate::s3_filesystem::extract_s3_config; + use opendal::layers::LoggingLayer; + use opendal::{services, Builder, Operator}; + + #[tokio::test] + async fn test_s3_stat() { + let config = AppConfig::from_file(Some("tests/conf/gvfs_fuse_s3.toml")).unwrap(); + let opendal_config = extract_s3_config(&config); + + let builder = services::S3::from_map(opendal_config); + + // Init an operator + let op = Operator::new(builder) + .expect("opendal create failed") + .layer(LoggingLayer::default()) + .finish(); + + let path = "/"; + let list = op.list(path).await; + if let Ok(l) = list { + for i in l { + println!("list result: {:?}", i); + } + } else { + println!("list error: {:?}", list.err()); + } + + let meta = op.stat_with(path).await; + if let Ok(m) = meta { + println!("stat result: {:?}", m); + } else { + println!("stat error: {:?}", meta.err()); + } + } +} diff --git a/clients/filesystem-fuse/src/opened_file.rs b/clients/filesystem-fuse/src/opened_file.rs new file mode 100644 index 00000000000..0c630e07217 --- /dev/null +++ b/clients/filesystem-fuse/src/opened_file.rs @@ -0,0 +1,172 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +use crate::filesystem::{FileReader, FileStat, FileWriter, Result}; +use bytes::Bytes; +use fuse3::{Errno, Timestamp}; +use std::time::SystemTime; + +/// Opened file for read or write, it is used to read or write the file content. +pub(crate) struct OpenedFile { + pub(crate) file_stat: FileStat, + + pub(crate) handle_id: u64, + + pub reader: Option>, + + pub writer: Option>, +} + +impl OpenedFile { + pub(crate) fn new(file_stat: FileStat) -> Self { + OpenedFile { + file_stat: file_stat, + handle_id: 0, + reader: None, + writer: None, + } + } + + pub(crate) async fn read(&mut self, offset: u64, size: u32) -> Result { + let reader = self.reader.as_mut().ok_or(Errno::from(libc::EBADF))?; + let result = reader.read(offset, size).await?; + + // update the atime + self.file_stat.atime = Timestamp::from(SystemTime::now()); + + Ok(result) + } + + pub(crate) async fn write(&mut self, offset: u64, data: &[u8]) -> Result { + let writer = self.writer.as_mut().ok_or(Errno::from(libc::EBADF))?; + let written = writer.write(offset, data).await?; + + // update the file size ,mtime and atime + let end = offset + written as u64; + if end > self.file_stat.size { + self.file_stat.size = end; + } + self.file_stat.atime = Timestamp::from(SystemTime::now()); + self.file_stat.mtime = self.file_stat.atime; + + Ok(written) + } + + pub(crate) async fn close(&mut self) -> Result<()> { + let mut errors = Vec::new(); + if let Some(mut reader) = self.reader.take() { + if let Err(e) = reader.close().await { + errors.push(e); + } + } + + if let Some(mut writer) = self.writer.take() { + if let Err(e) = self.flush().await { + errors.push(e); + } + if let Err(e) = writer.close().await { + errors.push(e); + } + } + + if !errors.is_empty() { + return Err(errors.remove(0)); + } + Ok(()) + } + + pub(crate) async fn flush(&mut self) -> Result<()> { + if let Some(writer) = &mut self.writer { + writer.flush().await?; + } + Ok(()) + } + + pub(crate) fn file_handle(&self) -> FileHandle { + debug_assert!(self.handle_id != 0); + debug_assert!(self.file_stat.file_id != 0); + FileHandle { + file_id: self.file_stat.file_id, + handle_id: self.handle_id, + } + } + + pub(crate) fn set_file_id(&mut self, parent_file_id: u64, file_id: u64) { + debug_assert!(file_id != 0 && parent_file_id != 0); + self.file_stat.set_file_id(parent_file_id, file_id) + } +} + +// FileHandle is the file handle for the opened file. +pub(crate) struct FileHandle { + pub(crate) file_id: u64, + + pub(crate) handle_id: u64, +} + +// OpenFileFlags is the open file flags for the file system. +pub(crate) struct OpenFileFlags(pub(crate) u32); + +impl OpenFileFlags { + pub fn is_read(&self) -> bool { + (self.0 & libc::O_WRONLY as u32) == 0 + } + + pub fn is_write(&self) -> bool { + (self.0 & libc::O_WRONLY as u32) != 0 || (self.0 & libc::O_RDWR as u32) != 0 + } + + pub fn is_append(&self) -> bool { + (self.0 & libc::O_APPEND as u32) != 0 + } + + pub fn is_create(&self) -> bool { + (self.0 & libc::O_CREAT as u32) != 0 + } + + pub fn is_truncate(&self) -> bool { + (self.0 & libc::O_TRUNC as u32) != 0 + } + + pub fn is_exclusive(&self) -> bool { + (self.0 & libc::O_EXCL as u32) != 0 + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::filesystem::FileStat; + use std::path::Path; + + #[test] + fn test_open_file() { + let mut open_file = OpenedFile::new(FileStat::new_file_filestat( + Path::new("a"), + "b".as_ref(), + 10, + )); + assert_eq!(open_file.file_stat.name, "b"); + assert_eq!(open_file.file_stat.size, 10); + + open_file.set_file_id(1, 2); + + assert_eq!(open_file.file_stat.file_id, 2); + assert_eq!(open_file.file_stat.parent_file_id, 1); + } +} diff --git a/clients/filesystem-fuse/src/opened_file_manager.rs b/clients/filesystem-fuse/src/opened_file_manager.rs new file mode 100644 index 00000000000..ab6a5d82347 --- /dev/null +++ b/clients/filesystem-fuse/src/opened_file_manager.rs @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +use crate::opened_file::OpenedFile; +use dashmap::DashMap; +use std::sync::atomic::AtomicU64; +use std::sync::Arc; +use tokio::sync::Mutex; + +// OpenedFileManager is a manager all the opened files. and allocate a file handle id for the opened file. +pub(crate) struct OpenedFileManager { + // file_handle_map is a map of file_handle_id to opened file. + file_handle_map: DashMap>>, + + // file_handle_id_generator is used to generate unique file handle IDs. + handle_id_generator: AtomicU64, +} + +impl OpenedFileManager { + pub fn new() -> Self { + Self { + file_handle_map: Default::default(), + handle_id_generator: AtomicU64::new(1), + } + } + + pub(crate) fn next_handle_id(&self) -> u64 { + self.handle_id_generator + .fetch_add(1, std::sync::atomic::Ordering::SeqCst) + } + + pub(crate) fn put(&self, mut file: OpenedFile) -> Arc> { + // Put the file into the file handle map, and allocate a file handle id for the file. + let file_handle_id = self.next_handle_id(); + file.handle_id = file_handle_id; + let file_handle = Arc::new(Mutex::new(file)); + self.file_handle_map + .insert(file_handle_id, file_handle.clone()); + file_handle + } + + pub(crate) fn get(&self, handle_id: u64) -> Option>> { + self.file_handle_map + .get(&handle_id) + .map(|x| x.value().clone()) + } + + pub(crate) fn remove(&self, handle_id: u64) -> Option>> { + self.file_handle_map.remove(&handle_id).map(|x| x.1) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::filesystem::FileStat; + use std::path::Path; + + #[tokio::test] + async fn test_opened_file_manager() { + let manager = OpenedFileManager::new(); + + let file1_stat = FileStat::new_file_filestat(Path::new(""), "a.txt".as_ref(), 13); + let file2_stat = FileStat::new_file_filestat(Path::new(""), "b.txt".as_ref(), 18); + + let file1 = OpenedFile::new(file1_stat.clone()); + let file2 = OpenedFile::new(file2_stat.clone()); + + let handle_id1 = manager.put(file1).lock().await.handle_id; + let handle_id2 = manager.put(file2).lock().await.handle_id; + + // Test the file handle id is assigned. + assert!(handle_id1 > 0 && handle_id2 > 0); + assert_ne!(handle_id1, handle_id2); + + // test get file by handle id + assert_eq!( + manager.get(handle_id1).unwrap().lock().await.file_stat.name, + file1_stat.name + ); + + assert_eq!( + manager.get(handle_id2).unwrap().lock().await.file_stat.name, + file2_stat.name + ); + + // test remove file by handle id + assert_eq!( + manager.remove(handle_id1).unwrap().lock().await.handle_id, + handle_id1 + ); + + // test get file by handle id after remove + assert!(manager.get(handle_id1).is_none()); + assert!(manager.get(handle_id2).is_some()); + } +} diff --git a/clients/filesystem-fuse/src/s3_filesystem.rs b/clients/filesystem-fuse/src/s3_filesystem.rs new file mode 100644 index 00000000000..e0ca69b4ccf --- /dev/null +++ b/clients/filesystem-fuse/src/s3_filesystem.rs @@ -0,0 +1,276 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +use crate::config::AppConfig; +use crate::error::ErrorCode::{InvalidConfig, OpenDalError}; +use crate::filesystem::{FileStat, FileSystemCapacity, FileSystemContext, PathFileSystem, Result}; +use crate::gravitino_client::{Catalog, Fileset}; +use crate::open_dal_filesystem::OpenDalFileSystem; +use crate::opened_file::{OpenFileFlags, OpenedFile}; +use crate::utils::{parse_location, GvfsResult}; +use async_trait::async_trait; +use log::error; +use opendal::layers::LoggingLayer; +use opendal::services::S3; +use opendal::{Builder, Operator}; +use std::collections::HashMap; +use std::path::Path; + +pub(crate) struct S3FileSystem { + open_dal_fs: OpenDalFileSystem, +} + +impl S3FileSystem {} + +impl S3FileSystem { + const S3_CONFIG_PREFIX: &'static str = "s3-"; + + pub(crate) fn new( + catalog: &Catalog, + fileset: &Fileset, + config: &AppConfig, + _fs_context: &FileSystemContext, + ) -> GvfsResult { + let mut opendal_config = extract_s3_config(config); + let bucket = extract_bucket(&fileset.storage_location)?; + opendal_config.insert("bucket".to_string(), bucket); + + let region = Self::get_s3_region(catalog)?; + opendal_config.insert("region".to_string(), region); + + let builder = S3::from_map(opendal_config); + + let op = Operator::new(builder); + if let Err(e) = op { + error!("opendal create failed: {:?}", e); + return Err(OpenDalError.to_error(format!("opendal create failed: {:?}", e))); + } + let op = op.unwrap().layer(LoggingLayer::default()).finish(); + let open_dal_fs = OpenDalFileSystem::new(op, config, _fs_context); + Ok(Self { + open_dal_fs: open_dal_fs, + }) + } + + fn get_s3_region(catalog: &Catalog) -> GvfsResult { + if let Some(region) = catalog.properties.get("s3-region") { + Ok(region.clone()) + } else if let Some(endpoint) = catalog.properties.get("s3-endpoint") { + extract_region(endpoint) + } else { + Err(InvalidConfig.to_error(format!( + "Cant not retrieve region in the Catalog {}", + catalog.name + ))) + } + } +} + +#[async_trait] +impl PathFileSystem for S3FileSystem { + async fn init(&self) -> Result<()> { + Ok(()) + } + + async fn stat(&self, path: &Path) -> Result { + self.open_dal_fs.stat(path).await + } + + async fn read_dir(&self, path: &Path) -> Result> { + self.open_dal_fs.read_dir(path).await + } + + async fn open_file(&self, path: &Path, flags: OpenFileFlags) -> Result { + self.open_dal_fs.open_file(path, flags).await + } + + async fn open_dir(&self, path: &Path, flags: OpenFileFlags) -> Result { + self.open_dal_fs.open_dir(path, flags).await + } + + async fn create_file(&self, path: &Path, flags: OpenFileFlags) -> Result { + self.open_dal_fs.create_file(path, flags).await + } + + async fn create_dir(&self, path: &Path) -> Result { + self.open_dal_fs.create_dir(path).await + } + + async fn set_attr(&self, path: &Path, file_stat: &FileStat, flush: bool) -> Result<()> { + self.open_dal_fs.set_attr(path, file_stat, flush).await + } + + async fn remove_file(&self, path: &Path) -> Result<()> { + self.open_dal_fs.remove_file(path).await + } + + async fn remove_dir(&self, path: &Path) -> Result<()> { + self.open_dal_fs.remove_dir(path).await + } + + fn get_capacity(&self) -> Result { + self.open_dal_fs.get_capacity() + } +} + +pub(crate) fn extract_bucket(location: &str) -> GvfsResult { + let url = parse_location(location)?; + match url.host_str() { + Some(host) => Ok(host.to_string()), + None => Err(InvalidConfig.to_error(format!( + "Invalid fileset location without bucket: {}", + location + ))), + } +} + +pub(crate) fn extract_region(location: &str) -> GvfsResult { + let url = parse_location(location)?; + match url.host_str() { + Some(host) => { + let parts: Vec<&str> = host.split('.').collect(); + if parts.len() > 1 { + Ok(parts[1].to_string()) + } else { + Err(InvalidConfig.to_error(format!( + "Invalid location: expected region in host, got {}", + location + ))) + } + } + None => Err(InvalidConfig.to_error(format!( + "Invalid fileset location without bucket: {}", + location + ))), + } +} + +pub fn extract_s3_config(config: &AppConfig) -> HashMap { + config + .extend_config + .clone() + .into_iter() + .filter_map(|(k, v)| { + if k.starts_with(S3FileSystem::S3_CONFIG_PREFIX) { + Some(( + k.strip_prefix(S3FileSystem::S3_CONFIG_PREFIX) + .unwrap() + .to_string(), + v, + )) + } else { + None + } + }) + .collect() +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::default_raw_filesystem::DefaultRawFileSystem; + use crate::filesystem::tests::{TestPathFileSystem, TestRawFileSystem}; + use crate::filesystem::RawFileSystem; + use opendal::layers::TimeoutLayer; + use std::time::Duration; + + #[test] + fn test_extract_bucket() { + let location = "s3://bucket/path/to/file"; + let result = extract_bucket(location); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), "bucket"); + } + + #[test] + fn test_extract_region() { + let location = "http://s3.ap-southeast-2.amazonaws.com"; + let result = extract_region(location); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), "ap-southeast-2"); + } + + async fn delete_dir(op: &Operator, dir_name: &str) { + let childs = op.list(dir_name).await.expect("list dir failed"); + for child in childs { + let child_name = dir_name.to_string() + child.name(); + if child.metadata().is_dir() { + Box::pin(delete_dir(op, &child_name)).await; + } else { + op.delete(&child_name).await.expect("delete file failed"); + } + } + op.delete(dir_name).await.expect("delete dir failed"); + } + + async fn create_s3_fs(cwd: &Path) -> S3FileSystem { + let config = AppConfig::from_file(Some("tests/conf/gvfs_fuse_s3.toml")).unwrap(); + let opendal_config = extract_s3_config(&config); + + let fs_context = FileSystemContext::default(); + + let builder = S3::from_map(opendal_config); + let op = Operator::new(builder) + .expect("opendal create failed") + .layer(LoggingLayer::default()) + .layer( + TimeoutLayer::new() + .with_timeout(Duration::from_secs(300)) + .with_io_timeout(Duration::from_secs(300)), + ) + .finish(); + + // clean up the test directory + let file_name = cwd.to_string_lossy().to_string() + "/"; + delete_dir(&op, &file_name).await; + op.create_dir(&file_name) + .await + .expect("create test dir failed"); + + let open_dal_fs = OpenDalFileSystem::new(op, &config, &fs_context); + S3FileSystem { open_dal_fs } + } + + #[tokio::test] + async fn test_s3_file_system() { + if std::env::var("RUN_S3_TESTS").is_err() { + return; + } + let cwd = Path::new("/gvfs_test1"); + let fs = create_s3_fs(cwd).await; + + let _ = fs.init().await; + let mut tester = TestPathFileSystem::new(cwd, fs); + tester.test_path_file_system().await; + } + + #[tokio::test] + async fn test_s3_file_system_with_raw_file_system() { + if std::env::var("RUN_S3_TESTS").is_err() { + return; + } + + let cwd = Path::new("/gvfs_test2"); + let s3_fs = create_s3_fs(cwd).await; + let raw_fs = + DefaultRawFileSystem::new(s3_fs, &AppConfig::default(), &FileSystemContext::default()); + let _ = raw_fs.init().await; + let mut tester = TestRawFileSystem::new(cwd, raw_fs); + tester.test_raw_file_system().await; + } +} diff --git a/clients/filesystem-fuse/src/utils.rs b/clients/filesystem-fuse/src/utils.rs new file mode 100644 index 00000000000..53eb9179d71 --- /dev/null +++ b/clients/filesystem-fuse/src/utils.rs @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +use crate::error::ErrorCode::InvalidConfig; +use crate::error::GvfsError; +use reqwest::Url; +use std::path::PathBuf; + +pub type GvfsResult = Result; + +pub(crate) fn parse_location(location: &str) -> GvfsResult { + let parsed_url = Url::parse(location); + if let Err(e) = parsed_url { + return Err(InvalidConfig.to_error(format!("Invalid fileset location: {}", e))); + } + Ok(parsed_url.unwrap()) +} + +pub(crate) fn extract_root_path(location: &str) -> GvfsResult { + let url = parse_location(location)?; + Ok(PathBuf::from(url.path())) +} + +#[cfg(test)] +mod tests { + use crate::utils::extract_root_path; + use std::path::PathBuf; + + #[test] + fn test_extract_root_path() { + let location = "s3://bucket/path/to/file"; + let result = extract_root_path(location); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), PathBuf::from("/path/to/file")); + } +} diff --git a/clients/filesystem-fuse/tests/conf/config_test.toml b/clients/filesystem-fuse/tests/conf/config_test.toml new file mode 100644 index 00000000000..524e0aa94fb --- /dev/null +++ b/clients/filesystem-fuse/tests/conf/config_test.toml @@ -0,0 +1,40 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# fuse settings +[fuse] +file_mask= 0o644 +dir_mask= 0o755 +fs_type = "memory" + +[fuse.properties] +key1 = "value1" +key2 = "value2" + +# filesystem settings +[filesystem] +block_size = 8192 + +# Gravitino settings +[gravitino] +uri = "http://localhost:8090" +metalake = "test" + +# extend settings +[extend_config] +s3-access_key_id = "XXX_access_key" +s3-secret_access_key = "XXX_secret_key" diff --git a/clients/filesystem-fuse/tests/conf/gvfs_fuse_memory.toml b/clients/filesystem-fuse/tests/conf/gvfs_fuse_memory.toml new file mode 100644 index 00000000000..0ec447cd087 --- /dev/null +++ b/clients/filesystem-fuse/tests/conf/gvfs_fuse_memory.toml @@ -0,0 +1,40 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# fuse settings +[fuse] +file_mask= 0o600 +dir_mask= 0o700 +fs_type = "memory" + +[fuse.properties] +key1 = "value1" +key2 = "value2" + +# filesystem settings +[filesystem] +block_size = 8192 + +# Gravitino settings +[gravitino] +uri = "http://localhost:8090" +metalake = "test" + +# extend settings +[extend_config] +s3-access_key_id = "XXX_access_key" +s3-secret_access_key = "XXX_secret_key" diff --git a/clients/filesystem-fuse/tests/conf/gvfs_fuse_s3.toml b/clients/filesystem-fuse/tests/conf/gvfs_fuse_s3.toml new file mode 100644 index 00000000000..7d182cd40df --- /dev/null +++ b/clients/filesystem-fuse/tests/conf/gvfs_fuse_s3.toml @@ -0,0 +1,43 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# fuse settings +[fuse] +file_mask= 0o600 +dir_mask= 0o700 +fs_type = "memory" + +[fuse.properties] +key1 = "value1" +key2 = "value2" + +# filesystem settings +[filesystem] +block_size = 8192 + +# Gravitino settings +[gravitino] +uri = "http://localhost:8090" +metalake = "test" + +# extend settings +[extend_config] +s3-access_key_id = "XXX_access_key" +s3-secret_access_key = "XXX_secret_key" +s3-region = "XXX_region" +s3-bucket = "XXX_bucket" + diff --git a/clients/filesystem-fuse/tests/fuse_test.rs b/clients/filesystem-fuse/tests/fuse_test.rs new file mode 100644 index 00000000000..d06199d782e --- /dev/null +++ b/clients/filesystem-fuse/tests/fuse_test.rs @@ -0,0 +1,164 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +use fuse3::Errno; +use gvfs_fuse::config::AppConfig; +use gvfs_fuse::{gvfs_mount, gvfs_unmount}; +use log::{error, info}; +use std::fs::File; +use std::path::Path; +use std::sync::Arc; +use std::thread::sleep; +use std::time::{Duration, Instant}; +use std::{fs, panic, process}; +use tokio::runtime::Runtime; +use tokio::task::JoinHandle; + +struct FuseTest { + runtime: Arc, + mount_point: String, + gvfs_mount: Option>>, +} + +impl FuseTest { + pub fn setup(&mut self) { + info!("Start gvfs fuse server"); + let mount_point = self.mount_point.clone(); + + let config = AppConfig::from_file(Some("tests/conf/gvfs_fuse_memory.toml")) + .expect("Failed to load config"); + self.runtime.spawn(async move { + let result = gvfs_mount(&mount_point, "", &config).await; + if let Err(e) = result { + error!("Failed to mount gvfs: {:?}", e); + return Err(Errno::from(libc::EINVAL)); + } + Ok(()) + }); + let success = Self::wait_for_fuse_server_ready(&self.mount_point, Duration::from_secs(15)); + assert!(success, "Fuse server cannot start up at 15 seconds"); + } + + pub fn shutdown(&mut self) { + self.runtime.block_on(async { + let _ = gvfs_unmount().await; + }); + } + + fn wait_for_fuse_server_ready(path: &str, timeout: Duration) -> bool { + let test_file = format!("{}/.gvfs_meta", path); + let start_time = Instant::now(); + + while start_time.elapsed() < timeout { + if file_exists(&test_file) { + info!("Fuse server is ready",); + return true; + } + info!("Wait for fuse server ready",); + sleep(Duration::from_secs(1)); + } + false + } +} + +impl Drop for FuseTest { + fn drop(&mut self) { + info!("Shutdown fuse server"); + self.shutdown(); + } +} + +#[test] +fn test_fuse_system_with_auto() { + tracing_subscriber::fmt().init(); + + panic::set_hook(Box::new(|info| { + error!("A panic occurred: {:?}", info); + process::exit(1); + })); + + let mount_point = "target/gvfs"; + let _ = fs::create_dir_all(mount_point); + + let mut test = FuseTest { + runtime: Arc::new(Runtime::new().unwrap()), + mount_point: mount_point.to_string(), + gvfs_mount: None, + }; + + test.setup(); + test_fuse_filesystem(mount_point); +} + +fn test_fuse_system_with_manual() { + test_fuse_filesystem("build/gvfs"); +} + +fn test_fuse_filesystem(mount_point: &str) { + info!("Test startup"); + let base_path = Path::new(mount_point); + + //test create file + let test_file = base_path.join("test_create"); + let file = File::create(&test_file).expect("Failed to create file"); + assert!(file.metadata().is_ok(), "Failed to get file metadata"); + assert!(file_exists(&test_file)); + + //test write file + fs::write(&test_file, "read test").expect("Failed to write file"); + + //test read file + let content = fs::read_to_string(test_file.clone()).expect("Failed to read file"); + assert_eq!(content, "read test", "File content mismatch"); + + //test delete file + fs::remove_file(test_file.clone()).expect("Failed to delete file"); + assert!(!file_exists(test_file)); + + //test create directory + let test_dir = base_path.join("test_dir"); + fs::create_dir(&test_dir).expect("Failed to create directory"); + + //test create file in directory + let test_file = base_path.join("test_dir/test_file"); + let file = File::create(&test_file).expect("Failed to create file"); + assert!(file.metadata().is_ok(), "Failed to get file metadata"); + + //test write file in directory + let test_file = base_path.join("test_dir/test_read"); + fs::write(&test_file, "read test").expect("Failed to write file"); + + //test read file in directory + let content = fs::read_to_string(&test_file).expect("Failed to read file"); + assert_eq!(content, "read test", "File content mismatch"); + + //test delete file in directory + fs::remove_file(&test_file).expect("Failed to delete file"); + assert!(!file_exists(&test_file)); + + //test delete directory + fs::remove_dir_all(&test_dir).expect("Failed to delete directory"); + assert!(!file_exists(&test_dir)); + + info!("Success test"); +} + +fn file_exists>(path: P) -> bool { + fs::metadata(path).is_ok() +} diff --git a/common/src/main/java/org/apache/gravitino/dto/requests/RoleCreateRequest.java b/common/src/main/java/org/apache/gravitino/dto/requests/RoleCreateRequest.java index 9d85c0c6e14..0466d7d3105 100644 --- a/common/src/main/java/org/apache/gravitino/dto/requests/RoleCreateRequest.java +++ b/common/src/main/java/org/apache/gravitino/dto/requests/RoleCreateRequest.java @@ -79,5 +79,14 @@ public void validate() throws IllegalArgumentException { Preconditions.checkArgument( StringUtils.isNotBlank(name), "\"name\" field is required and cannot be empty"); Preconditions.checkArgument(securableObjects != null, "\"securableObjects\" can't null "); + for (SecurableObjectDTO objectDTO : securableObjects) { + Preconditions.checkArgument( + StringUtils.isNotBlank(objectDTO.name()), "\" securable object name\" can't be blank"); + Preconditions.checkArgument( + objectDTO.type() != null, "\" securable object type\" can't be null"); + Preconditions.checkArgument( + objectDTO.privileges() != null && !objectDTO.privileges().isEmpty(), + "\"securable object privileges\" can't be null or empty"); + } } } diff --git a/core/src/main/java/org/apache/gravitino/catalog/SchemaOperationDispatcher.java b/core/src/main/java/org/apache/gravitino/catalog/SchemaOperationDispatcher.java index 789e5e47155..8f36ce0d957 100644 --- a/core/src/main/java/org/apache/gravitino/catalog/SchemaOperationDispatcher.java +++ b/core/src/main/java/org/apache/gravitino/catalog/SchemaOperationDispatcher.java @@ -277,36 +277,40 @@ public Schema alterSchema(NameIdentifier ident, SchemaChange... changes) @Override public boolean dropSchema(NameIdentifier ident, boolean cascade) throws NonEmptySchemaException { NameIdentifier catalogIdent = getCatalogIdentifier(ident); - boolean droppedFromCatalog = - doWithCatalog( - catalogIdent, - c -> c.doWithSchemaOps(s -> s.dropSchema(ident, cascade)), - NonEmptySchemaException.class, - RuntimeException.class); - - // For managed schema, we don't need to drop the schema from the store again. - boolean isManagedSchema = isManagedEntity(catalogIdent, Capability.Scope.SCHEMA); - if (isManagedSchema) { - return droppedFromCatalog; - } - - // For unmanaged schema, it could happen that the schema: - // 1. Is not found in the catalog (dropped directly from underlying sources) - // 2. Is found in the catalog but not in the store (not managed by Gravitino) - // 3. Is found in the catalog and the store (managed by Gravitino) - // 4. Neither found in the catalog nor in the store. - // In all situations, we try to delete the schema from the store, but we don't take the - // return value of the store operation into account. We only take the return value of the - // catalog into account. - try { - store.delete(ident, SCHEMA, cascade); - } catch (NoSuchEntityException e) { - LOG.warn("The schema to be dropped does not exist in the store: {}", ident, e); - } catch (Exception e) { - throw new RuntimeException(e); - } - - return droppedFromCatalog; + return TreeLockUtils.doWithTreeLock( + catalogIdent, + LockType.WRITE, + () -> { + boolean droppedFromCatalog = + doWithCatalog( + catalogIdent, + c -> c.doWithSchemaOps(s -> s.dropSchema(ident, cascade)), + NonEmptySchemaException.class, + RuntimeException.class); + + // For managed schema, we don't need to drop the schema from the store again. + boolean isManagedSchema = isManagedEntity(catalogIdent, Capability.Scope.SCHEMA); + if (isManagedSchema) { + return droppedFromCatalog; + } + + // For unmanaged schema, it could happen that the schema: + // 1. Is not found in the catalog (dropped directly from underlying sources) + // 2. Is found in the catalog but not in the store (not managed by Gravitino) + // 3. Is found in the catalog and the store (managed by Gravitino) + // 4. Neither found in the catalog nor in the store. + // In all situations, we try to delete the schema from the store, but we don't take the + // return value of the store operation into account. We only take the return value of the + // catalog into account. + try { + store.delete(ident, SCHEMA, cascade); + } catch (NoSuchEntityException e) { + LOG.warn("The schema to be dropped does not exist in the store: {}", ident, e); + } catch (Exception e) { + throw new RuntimeException(e); + } + return droppedFromCatalog; + }); } private void importSchema(NameIdentifier identifier) { diff --git a/core/src/main/java/org/apache/gravitino/catalog/TableOperationDispatcher.java b/core/src/main/java/org/apache/gravitino/catalog/TableOperationDispatcher.java index 7a4c5a5655b..3e6aa2abbef 100644 --- a/core/src/main/java/org/apache/gravitino/catalog/TableOperationDispatcher.java +++ b/core/src/main/java/org/apache/gravitino/catalog/TableOperationDispatcher.java @@ -62,6 +62,7 @@ import org.apache.gravitino.rel.indexes.Index; import org.apache.gravitino.rel.indexes.Indexes; import org.apache.gravitino.storage.IdGenerator; +import org.apache.gravitino.utils.NameIdentifierUtil; import org.apache.gravitino.utils.PrincipalUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -269,33 +270,41 @@ public Table alterTable(NameIdentifier ident, TableChange... changes) */ @Override public boolean dropTable(NameIdentifier ident) { - NameIdentifier catalogIdent = getCatalogIdentifier(ident); - boolean droppedFromCatalog = - doWithCatalog( - catalogIdent, c -> c.doWithTableOps(t -> t.dropTable(ident)), RuntimeException.class); - - // For unmanaged table, it could happen that the table: - // 1. Is not found in the catalog (dropped directly from underlying sources) - // 2. Is found in the catalog but not in the store (not managed by Gravitino) - // 3. Is found in the catalog and the store (managed by Gravitino) - // 4. Neither found in the catalog nor in the store. - // In all situations, we try to delete the schema from the store, but we don't take the - // return value of the store operation into account. We only take the return value of the - // catalog into account. - // - // For managed table, we should take the return value of the store operation into account. - boolean droppedFromStore = false; - try { - droppedFromStore = store.delete(ident, TABLE); - } catch (NoSuchEntityException e) { - LOG.warn("The table to be dropped does not exist in the store: {}", ident, e); - } catch (Exception e) { - throw new RuntimeException(e); - } - - return isManagedEntity(catalogIdent, Capability.Scope.TABLE) - ? droppedFromStore - : droppedFromCatalog; + NameIdentifier schemaIdentifier = NameIdentifierUtil.getSchemaIdentifier(ident); + return TreeLockUtils.doWithTreeLock( + schemaIdentifier, + LockType.WRITE, + () -> { + NameIdentifier catalogIdent = getCatalogIdentifier(ident); + boolean droppedFromCatalog = + doWithCatalog( + catalogIdent, + c -> c.doWithTableOps(t -> t.dropTable(ident)), + RuntimeException.class); + + // For unmanaged table, it could happen that the table: + // 1. Is not found in the catalog (dropped directly from underlying sources) + // 2. Is found in the catalog but not in the store (not managed by Gravitino) + // 3. Is found in the catalog and the store (managed by Gravitino) + // 4. Neither found in the catalog nor in the store. + // In all situations, we try to delete the schema from the store, but we don't take the + // return value of the store operation into account. We only take the return value of the + // catalog into account. + // + // For managed table, we should take the return value of the store operation into account. + boolean droppedFromStore = false; + try { + droppedFromStore = store.delete(ident, TABLE); + } catch (NoSuchEntityException e) { + LOG.warn("The table to be dropped does not exist in the store: {}", ident, e); + } catch (Exception e) { + throw new RuntimeException(e); + } + + return isManagedEntity(catalogIdent, Capability.Scope.TABLE) + ? droppedFromStore + : droppedFromCatalog; + }); } /** @@ -314,37 +323,43 @@ public boolean dropTable(NameIdentifier ident) { */ @Override public boolean purgeTable(NameIdentifier ident) throws UnsupportedOperationException { - NameIdentifier catalogIdent = getCatalogIdentifier(ident); - boolean droppedFromCatalog = - doWithCatalog( - catalogIdent, - c -> c.doWithTableOps(t -> t.purgeTable(ident)), - RuntimeException.class, - UnsupportedOperationException.class); - - // For unmanaged table, it could happen that the table: - // 1. Is not found in the catalog (dropped directly from underlying sources) - // 2. Is found in the catalog but not in the store (not managed by Gravitino) - // 3. Is found in the catalog and the store (managed by Gravitino) - // 4. Neither found in the catalog nor in the store. - // In all situations, we try to delete the schema from the store, but we don't take the - // return value of the store operation into account. We only take the return value of the - // catalog into account. - // - // For managed table, we should take the return value of the store operation into account. - boolean droppedFromStore = false; - try { - droppedFromStore = store.delete(ident, TABLE); - } catch (NoSuchEntityException e) { - LOG.warn("The table to be purged does not exist in the store: {}", ident, e); - return false; - } catch (Exception e) { - throw new RuntimeException(e); - } - - return isManagedEntity(catalogIdent, Capability.Scope.TABLE) - ? droppedFromStore - : droppedFromCatalog; + NameIdentifier schemaIdentifier = NameIdentifierUtil.getSchemaIdentifier(ident); + return TreeLockUtils.doWithTreeLock( + schemaIdentifier, + LockType.WRITE, + () -> { + NameIdentifier catalogIdent = getCatalogIdentifier(ident); + boolean droppedFromCatalog = + doWithCatalog( + catalogIdent, + c -> c.doWithTableOps(t -> t.purgeTable(ident)), + RuntimeException.class, + UnsupportedOperationException.class); + + // For unmanaged table, it could happen that the table: + // 1. Is not found in the catalog (dropped directly from underlying sources) + // 2. Is found in the catalog but not in the store (not managed by Gravitino) + // 3. Is found in the catalog and the store (managed by Gravitino) + // 4. Neither found in the catalog nor in the store. + // In all situations, we try to delete the schema from the store, but we don't take the + // return value of the store operation into account. We only take the return value of the + // catalog into account. + // + // For managed table, we should take the return value of the store operation into account. + boolean droppedFromStore = false; + try { + droppedFromStore = store.delete(ident, TABLE); + } catch (NoSuchEntityException e) { + LOG.warn("The table to be purged does not exist in the store: {}", ident, e); + return false; + } catch (Exception e) { + throw new RuntimeException(e); + } + + return isManagedEntity(catalogIdent, Capability.Scope.TABLE) + ? droppedFromStore + : droppedFromCatalog; + }); } private EntityCombinedTable importTable(NameIdentifier identifier) { diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/base/GroupMetaBaseSQLProvider.java b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/base/GroupMetaBaseSQLProvider.java index a52e1b86144..1f28b771c2d 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/base/GroupMetaBaseSQLProvider.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/base/GroupMetaBaseSQLProvider.java @@ -57,16 +57,19 @@ public String listExtendedGroupPOsByMetalakeId(Long metalakeId) { + " JSON_ARRAYAGG(rot.role_id) as roleIds" + " FROM " + GROUP_TABLE_NAME - + " gt LEFT OUTER JOIN " + + " gt LEFT OUTER JOIN (" + + " SELECT * FROM " + GROUP_ROLE_RELATION_TABLE_NAME - + " rt ON rt.group_id = gt.group_id" - + " LEFT OUTER JOIN " + + " WHERE deleted_at = 0)" + + " AS rt ON rt.group_id = gt.group_id" + + " LEFT OUTER JOIN (" + + " SELECT * FROM " + ROLE_TABLE_NAME - + " rot ON rot.role_id = rt.role_id" + + " WHERE deleted_at = 0)" + + " AS rot ON rot.role_id = rt.role_id" + " WHERE " + " gt.deleted_at = 0 AND" - + " (rot.deleted_at = 0 OR rot.deleted_at is NULL) AND" - + " (rt.deleted_at = 0 OR rt.deleted_at is NULL) AND gt.metalake_id = #{metalakeId}" + + " gt.metalake_id = #{metalakeId}" + " GROUP BY gt.group_id"; } diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/base/UserMetaBaseSQLProvider.java b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/base/UserMetaBaseSQLProvider.java index 2a211c24f5e..4e81ae35df9 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/base/UserMetaBaseSQLProvider.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/base/UserMetaBaseSQLProvider.java @@ -165,16 +165,19 @@ public String listExtendedUserPOsByMetalakeId(@Param("metalakeId") Long metalake + " JSON_ARRAYAGG(rot.role_id) as roleIds" + " FROM " + USER_TABLE_NAME - + " ut LEFT OUTER JOIN " + + " ut LEFT OUTER JOIN (" + + " SELECT * FROM " + USER_ROLE_RELATION_TABLE_NAME - + " rt ON rt.user_id = ut.user_id" - + " LEFT OUTER JOIN " + + " WHERE deleted_at = 0)" + + " AS rt ON rt.user_id = ut.user_id" + + " LEFT OUTER JOIN (" + + " SELECT * FROM " + ROLE_TABLE_NAME - + " rot ON rot.role_id = rt.role_id" + + " WHERE deleted_at = 0)" + + " AS rot ON rot.role_id = rt.role_id" + " WHERE " + " ut.deleted_at = 0 AND" - + " (rot.deleted_at = 0 OR rot.deleted_at is NULL) AND" - + " (rt.deleted_at = 0 OR rt.deleted_at is NULL) AND ut.metalake_id = #{metalakeId}" + + " ut.metalake_id = #{metalakeId}" + " GROUP BY ut.user_id"; } diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/h2/GroupMetaH2Provider.java b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/h2/GroupMetaH2Provider.java index 175d9d8ae9a..e975131e090 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/h2/GroupMetaH2Provider.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/h2/GroupMetaH2Provider.java @@ -37,16 +37,19 @@ public String listExtendedGroupPOsByMetalakeId(@Param("metalakeId") Long metalak + " '[' || GROUP_CONCAT('\"' || rot.role_id || '\"') || ']' as roleIds" + " FROM " + GROUP_TABLE_NAME - + " gt LEFT OUTER JOIN " + + " gt LEFT OUTER JOIN (" + + " SELECT * FROM " + GROUP_ROLE_RELATION_TABLE_NAME - + " rt ON rt.group_id = gt.group_id" - + " LEFT OUTER JOIN " + + " WHERE deleted_at = 0)" + + " AS rt ON rt.group_id = gt.group_id" + + " LEFT OUTER JOIN (" + + " SELECT * FROM " + ROLE_TABLE_NAME - + " rot ON rot.role_id = rt.role_id" + + " WHERE deleted_at = 0)" + + " AS rot ON rot.role_id = rt.role_id" + " WHERE " + " gt.deleted_at = 0 AND" - + " (rot.deleted_at = 0 OR rot.deleted_at is NULL) AND" - + " (rt.deleted_at = 0 OR rt.deleted_at is NULL) AND gt.metalake_id = #{metalakeId}" + + " gt.metalake_id = #{metalakeId}" + " GROUP BY gt.group_id"; } } diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/h2/UserMetaH2Provider.java b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/h2/UserMetaH2Provider.java index be17138ce49..b4fb1614904 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/h2/UserMetaH2Provider.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/h2/UserMetaH2Provider.java @@ -37,16 +37,19 @@ public String listExtendedUserPOsByMetalakeId(@Param("metalakeId") Long metalake + " '[' || GROUP_CONCAT('\"' || rot.role_id || '\"') || ']' as roleIds" + " FROM " + USER_TABLE_NAME - + " ut LEFT OUTER JOIN " + + " ut LEFT OUTER JOIN (" + + " SELECT * FROM " + USER_ROLE_RELATION_TABLE_NAME - + " rt ON rt.user_id = ut.user_id" - + " LEFT OUTER JOIN " + + " WHERE deleted_at = 0)" + + " AS rt ON rt.user_id = ut.user_id" + + " LEFT OUTER JOIN (" + + " SELECT * FROM " + ROLE_TABLE_NAME - + " rot ON rot.role_id = rt.role_id" + + " WHERE deleted_at = 0)" + + " AS rot ON rot.role_id = rt.role_id" + " WHERE " + " ut.deleted_at = 0 AND " - + "(rot.deleted_at = 0 OR rot.deleted_at is NULL) AND " - + "(rt.deleted_at = 0 OR rt.deleted_at is NULL) AND ut.metalake_id = #{metalakeId}" + + " ut.metalake_id = #{metalakeId}" + " GROUP BY ut.user_id"; } } diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/postgresql/GroupMetaPostgreSQLProvider.java b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/postgresql/GroupMetaPostgreSQLProvider.java index 51cf47bf7d7..3ace33f6f84 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/postgresql/GroupMetaPostgreSQLProvider.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/postgresql/GroupMetaPostgreSQLProvider.java @@ -80,16 +80,19 @@ public String listExtendedGroupPOsByMetalakeId(Long metalakeId) { + " JSON_AGG(rot.role_id) as roleIds" + " FROM " + GROUP_TABLE_NAME - + " gt LEFT OUTER JOIN " + + " gt LEFT OUTER JOIN (" + + " SELECT * FROM " + GROUP_ROLE_RELATION_TABLE_NAME - + " rt ON rt.group_id = gt.group_id" - + " LEFT OUTER JOIN " + + " WHERE deleted_at = 0)" + + " AS rt ON rt.group_id = gt.group_id" + + " LEFT OUTER JOIN (" + + " SELECT * FROM " + ROLE_TABLE_NAME - + " rot ON rot.role_id = rt.role_id" + + " WHERE deleted_at = 0)" + + " AS rot ON rot.role_id = rt.role_id" + " WHERE " + " gt.deleted_at = 0 AND" - + " (rot.deleted_at = 0 OR rot.deleted_at is NULL) AND" - + " (rt.deleted_at = 0 OR rt.deleted_at is NULL) AND gt.metalake_id = #{metalakeId}" + + " gt.metalake_id = #{metalakeId}" + " GROUP BY gt.group_id"; } } diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/postgresql/UserMetaPostgreSQLProvider.java b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/postgresql/UserMetaPostgreSQLProvider.java index b6ac62b2b87..84ab965582c 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/postgresql/UserMetaPostgreSQLProvider.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/mapper/provider/postgresql/UserMetaPostgreSQLProvider.java @@ -80,16 +80,19 @@ public String listExtendedUserPOsByMetalakeId(Long metalakeId) { + " JSON_AGG(rot.role_id) as roleIds" + " FROM " + USER_TABLE_NAME - + " ut LEFT OUTER JOIN " + + " ut LEFT OUTER JOIN (" + + " SELECT * FROM " + USER_ROLE_RELATION_TABLE_NAME - + " rt ON rt.user_id = ut.user_id" - + " LEFT OUTER JOIN " + + " WHERE deleted_at = 0)" + + " AS rt ON rt.user_id = ut.user_id" + + " LEFT OUTER JOIN (" + + " SELECT * FROM " + ROLE_TABLE_NAME - + " rot ON rot.role_id = rt.role_id" + + " WHERE deleted_at = 0)" + + " AS rot ON rot.role_id = rt.role_id" + " WHERE " + " ut.deleted_at = 0 AND" - + " (rot.deleted_at = 0 OR rot.deleted_at is NULL) AND" - + " (rt.deleted_at = 0 OR rt.deleted_at is NULL) AND ut.metalake_id = #{metalakeId}" + + " ut.metalake_id = #{metalakeId}" + " GROUP BY ut.user_id"; } } diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/service/CommonMetaService.java b/core/src/main/java/org/apache/gravitino/storage/relational/service/CommonMetaService.java index f990e94fdcc..bdab2ad9fe5 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/service/CommonMetaService.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/service/CommonMetaService.java @@ -57,4 +57,29 @@ public Long getParentEntityIdByNamespace(Namespace namespace) { "Parent entity id should not be null and should be greater than 0."); return parentEntityId; } + + public Long[] getParentEntityIdsByNamespace(Namespace namespace) { + Preconditions.checkArgument( + !namespace.isEmpty() && namespace.levels().length <= 3, + "Namespace should not be empty and length should be less than or equal to 3."); + Long[] parentEntityIds = new Long[namespace.levels().length]; + if (namespace.levels().length >= 1) { + parentEntityIds[0] = + MetalakeMetaService.getInstance().getMetalakeIdByName(namespace.level(0)); + } + + if (namespace.levels().length >= 2) { + parentEntityIds[1] = + CatalogMetaService.getInstance() + .getCatalogIdByMetalakeIdAndName(parentEntityIds[0], namespace.level(1)); + } + + if (namespace.levels().length >= 3) { + parentEntityIds[2] = + SchemaMetaService.getInstance() + .getSchemaIdByCatalogIdAndName(parentEntityIds[1], namespace.level(2)); + } + + return parentEntityIds; + } } diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/service/FilesetMetaService.java b/core/src/main/java/org/apache/gravitino/storage/relational/service/FilesetMetaService.java index e049f436406..9233005c34a 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/service/FilesetMetaService.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/service/FilesetMetaService.java @@ -314,26 +314,10 @@ public int deleteFilesetVersionsByRetentionCount(Long versionRetentionCount, int private void fillFilesetPOBuilderParentEntityId(FilesetPO.Builder builder, Namespace namespace) { NamespaceUtil.checkFileset(namespace); - Long parentEntityId = null; - for (int level = 0; level < namespace.levels().length; level++) { - String name = namespace.level(level); - switch (level) { - case 0: - parentEntityId = MetalakeMetaService.getInstance().getMetalakeIdByName(name); - builder.withMetalakeId(parentEntityId); - continue; - case 1: - parentEntityId = - CatalogMetaService.getInstance() - .getCatalogIdByMetalakeIdAndName(parentEntityId, name); - builder.withCatalogId(parentEntityId); - continue; - case 2: - parentEntityId = - SchemaMetaService.getInstance().getSchemaIdByCatalogIdAndName(parentEntityId, name); - builder.withSchemaId(parentEntityId); - break; - } - } + Long[] parentEntityIds = + CommonMetaService.getInstance().getParentEntityIdsByNamespace(namespace); + builder.withMetalakeId(parentEntityIds[0]); + builder.withCatalogId(parentEntityIds[1]); + builder.withSchemaId(parentEntityIds[2]); } } diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/service/MetadataObjectService.java b/core/src/main/java/org/apache/gravitino/storage/relational/service/MetadataObjectService.java index 9834bafa0e0..e6790a602c1 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/service/MetadataObjectService.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/service/MetadataObjectService.java @@ -50,10 +50,10 @@ public static long getMetadataObjectId( return MetalakeMetaService.getInstance().getMetalakeIdByName(fullName); } - List names = DOT_SPLITTER.splitToList(fullName); if (type == MetadataObject.Type.ROLE) { - return RoleMetaService.getInstance().getRoleIdByMetalakeIdAndName(metalakeId, names.get(0)); + return RoleMetaService.getInstance().getRoleIdByMetalakeIdAndName(metalakeId, fullName); } + List names = DOT_SPLITTER.splitToList(fullName); long catalogId = CatalogMetaService.getInstance().getCatalogIdByMetalakeIdAndName(metalakeId, names.get(0)); diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/service/ModelMetaService.java b/core/src/main/java/org/apache/gravitino/storage/relational/service/ModelMetaService.java index 2da43755c51..0197dfdd2dd 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/service/ModelMetaService.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/service/ModelMetaService.java @@ -172,20 +172,10 @@ ModelPO getModelPOById(Long modelId) { private void fillModelPOBuilderParentEntityId(ModelPO.Builder builder, Namespace ns) { NamespaceUtil.checkModel(ns); - String metalake = ns.level(0); - String catalog = ns.level(1); - String schema = ns.level(2); - - Long metalakeId = MetalakeMetaService.getInstance().getMetalakeIdByName(metalake); - builder.withMetalakeId(metalakeId); - - Long catalogId = - CatalogMetaService.getInstance().getCatalogIdByMetalakeIdAndName(metalakeId, catalog); - builder.withCatalogId(catalogId); - - Long schemaId = - SchemaMetaService.getInstance().getSchemaIdByCatalogIdAndName(catalogId, schema); - builder.withSchemaId(schemaId); + Long[] parentEntityIds = CommonMetaService.getInstance().getParentEntityIdsByNamespace(ns); + builder.withMetalakeId(parentEntityIds[0]); + builder.withCatalogId(parentEntityIds[1]); + builder.withSchemaId(parentEntityIds[2]); } ModelPO getModelPOByIdentifier(NameIdentifier ident) { diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/service/SchemaMetaService.java b/core/src/main/java/org/apache/gravitino/storage/relational/service/SchemaMetaService.java index 4c9c828cb9c..f300e70cae3 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/service/SchemaMetaService.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/service/SchemaMetaService.java @@ -316,21 +316,9 @@ public int deleteSchemaMetasByLegacyTimeline(Long legacyTimeline, int limit) { private void fillSchemaPOBuilderParentEntityId(SchemaPO.Builder builder, Namespace namespace) { NamespaceUtil.checkSchema(namespace); - Long parentEntityId = null; - for (int level = 0; level < namespace.levels().length; level++) { - String name = namespace.level(level); - switch (level) { - case 0: - parentEntityId = MetalakeMetaService.getInstance().getMetalakeIdByName(name); - builder.withMetalakeId(parentEntityId); - continue; - case 1: - parentEntityId = - CatalogMetaService.getInstance() - .getCatalogIdByMetalakeIdAndName(parentEntityId, name); - builder.withCatalogId(parentEntityId); - break; - } - } + Long[] parentEntityIds = + CommonMetaService.getInstance().getParentEntityIdsByNamespace(namespace); + builder.withMetalakeId(parentEntityIds[0]); + builder.withCatalogId(parentEntityIds[1]); } } diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/service/TableMetaService.java b/core/src/main/java/org/apache/gravitino/storage/relational/service/TableMetaService.java index 248dedd8a73..bc44ac43a92 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/service/TableMetaService.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/service/TableMetaService.java @@ -253,27 +253,11 @@ public int deleteTableMetasByLegacyTimeline(Long legacyTimeline, int limit) { private void fillTablePOBuilderParentEntityId(TablePO.Builder builder, Namespace namespace) { NamespaceUtil.checkTable(namespace); - Long parentEntityId = null; - for (int level = 0; level < namespace.levels().length; level++) { - String name = namespace.level(level); - switch (level) { - case 0: - parentEntityId = MetalakeMetaService.getInstance().getMetalakeIdByName(name); - builder.withMetalakeId(parentEntityId); - continue; - case 1: - parentEntityId = - CatalogMetaService.getInstance() - .getCatalogIdByMetalakeIdAndName(parentEntityId, name); - builder.withCatalogId(parentEntityId); - continue; - case 2: - parentEntityId = - SchemaMetaService.getInstance().getSchemaIdByCatalogIdAndName(parentEntityId, name); - builder.withSchemaId(parentEntityId); - break; - } - } + Long[] parentEntityIds = + CommonMetaService.getInstance().getParentEntityIdsByNamespace(namespace); + builder.withMetalakeId(parentEntityIds[0]); + builder.withCatalogId(parentEntityIds[1]); + builder.withSchemaId(parentEntityIds[2]); } private TablePO getTablePOBySchemaIdAndName(Long schemaId, String tableName) { diff --git a/core/src/main/java/org/apache/gravitino/storage/relational/service/TopicMetaService.java b/core/src/main/java/org/apache/gravitino/storage/relational/service/TopicMetaService.java index 7bc933824aa..66a12aa9de1 100644 --- a/core/src/main/java/org/apache/gravitino/storage/relational/service/TopicMetaService.java +++ b/core/src/main/java/org/apache/gravitino/storage/relational/service/TopicMetaService.java @@ -154,27 +154,11 @@ public TopicPO getTopicPOById(Long topicId) { private void fillTopicPOBuilderParentEntityId(TopicPO.Builder builder, Namespace namespace) { NamespaceUtil.checkTopic(namespace); - Long parentEntityId = null; - for (int level = 0; level < namespace.levels().length; level++) { - String name = namespace.level(level); - switch (level) { - case 0: - parentEntityId = MetalakeMetaService.getInstance().getMetalakeIdByName(name); - builder.withMetalakeId(parentEntityId); - continue; - case 1: - parentEntityId = - CatalogMetaService.getInstance() - .getCatalogIdByMetalakeIdAndName(parentEntityId, name); - builder.withCatalogId(parentEntityId); - continue; - case 2: - parentEntityId = - SchemaMetaService.getInstance().getSchemaIdByCatalogIdAndName(parentEntityId, name); - builder.withSchemaId(parentEntityId); - break; - } - } + Long[] parentEntityIds = + CommonMetaService.getInstance().getParentEntityIdsByNamespace(namespace); + builder.withMetalakeId(parentEntityIds[0]); + builder.withCatalogId(parentEntityIds[1]); + builder.withSchemaId(parentEntityIds[2]); } public TopicEntity getTopicByIdentifier(NameIdentifier identifier) { diff --git a/core/src/main/java/org/apache/gravitino/utils/MetadataObjectUtil.java b/core/src/main/java/org/apache/gravitino/utils/MetadataObjectUtil.java index 44b9d30a0a7..eb963182bf3 100644 --- a/core/src/main/java/org/apache/gravitino/utils/MetadataObjectUtil.java +++ b/core/src/main/java/org/apache/gravitino/utils/MetadataObjectUtil.java @@ -32,6 +32,7 @@ import org.apache.gravitino.MetadataObject; import org.apache.gravitino.NameIdentifier; import org.apache.gravitino.authorization.AuthorizationUtils; +import org.apache.gravitino.exceptions.IllegalMetadataObjectException; import org.apache.gravitino.exceptions.NoSuchMetadataObjectException; import org.apache.gravitino.exceptions.NoSuchRoleException; @@ -125,6 +126,9 @@ public static void checkMetadataObject(String metalake, MetadataObject object) { switch (object.type()) { case METALAKE: + if (!metalake.equals(object.name())) { + throw new IllegalMetadataObjectException("The metalake object name must be %s", metalake); + } NameIdentifierUtil.checkMetalake(identifier); check(env.metalakeDispatcher().metalakeExists(identifier), exceptionToThrowSupplier); break; diff --git a/core/src/main/java/org/apache/gravitino/utils/NameIdentifierUtil.java b/core/src/main/java/org/apache/gravitino/utils/NameIdentifierUtil.java index b656bfa95da..2b7e69ebee0 100644 --- a/core/src/main/java/org/apache/gravitino/utils/NameIdentifierUtil.java +++ b/core/src/main/java/org/apache/gravitino/utils/NameIdentifierUtil.java @@ -249,7 +249,8 @@ public static NameIdentifier toModelVersionIdentifier(NameIdentifier modelIdent, public static NameIdentifier getCatalogIdentifier(NameIdentifier ident) throws IllegalNameIdentifierException { NameIdentifier.check( - ident.name() != null, "The name variable in the NameIdentifier must have value."); + ident.name() != null && !ident.name().isEmpty(), + "The name variable in the NameIdentifier must have value."); Namespace.check( ident.namespace() != null && !ident.namespace().isEmpty(), "Catalog namespace must be non-null and have 1 level, the input namespace is %s", @@ -265,6 +266,34 @@ public static NameIdentifier getCatalogIdentifier(NameIdentifier ident) return NameIdentifier.of(allElems.get(0), allElems.get(1)); } + /** + * Try to get the schema {@link NameIdentifier} from the given {@link NameIdentifier}. + * + * @param ident The {@link NameIdentifier} to check. + * @return The schema {@link NameIdentifier} + * @throws IllegalNameIdentifierException If the given {@link NameIdentifier} does not include + * schema name + */ + public static NameIdentifier getSchemaIdentifier(NameIdentifier ident) + throws IllegalNameIdentifierException { + NameIdentifier.check( + ident.name() != null && !ident.name().isEmpty(), + "The name variable in the NameIdentifier must have value."); + Namespace.check( + ident.namespace() != null && !ident.namespace().isEmpty() && ident.namespace().length() > 1, + "Schema namespace must be non-null and at least 1 level, the input namespace is %s", + ident.namespace()); + + List allElems = + Stream.concat(Arrays.stream(ident.namespace().levels()), Stream.of(ident.name())) + .collect(Collectors.toList()); + if (allElems.size() < 3) { + throw new IllegalNameIdentifierException( + "Cannot create a schema NameIdentifier less than three elements."); + } + return NameIdentifier.of(allElems.get(0), allElems.get(1), allElems.get(2)); + } + /** * Check the given {@link NameIdentifier} is a metalake identifier. Throw an {@link * IllegalNameIdentifierException} if it's not. diff --git a/core/src/test/java/org/apache/gravitino/storage/relational/TestJDBCBackend.java b/core/src/test/java/org/apache/gravitino/storage/relational/TestJDBCBackend.java index 3c9339ff62f..8cd2c802e86 100644 --- a/core/src/test/java/org/apache/gravitino/storage/relational/TestJDBCBackend.java +++ b/core/src/test/java/org/apache/gravitino/storage/relational/TestJDBCBackend.java @@ -81,6 +81,7 @@ import org.apache.gravitino.storage.RandomIdGenerator; import org.apache.gravitino.storage.relational.mapper.GroupMetaMapper; import org.apache.gravitino.storage.relational.mapper.UserMetaMapper; +import org.apache.gravitino.storage.relational.service.MetalakeMetaService; import org.apache.gravitino.storage.relational.service.RoleMetaService; import org.apache.gravitino.storage.relational.session.SqlSessionFactoryHelper; import org.apache.gravitino.storage.relational.utils.SessionUtils; @@ -952,6 +953,98 @@ public void testMetaLifeCycleFromCreationToDeletion() throws IOException { assertEquals(1, listFilesetVersions(anotherFileset.id()).size()); } + @Test + public void testGetRoleIdByMetalakeIdAndName() throws IOException { + AuditInfo auditInfo = + AuditInfo.builder().withCreator("creator").withCreateTime(Instant.now()).build(); + String metalakeName = "testMetalake"; + String catalogName = "catalog"; + String roleNameWithDot = "role.with.dot"; + String roleNameWithoutDot = "roleWithoutDot"; + + BaseMetalake metalake = + createBaseMakeLake(RandomIdGenerator.INSTANCE.nextId(), metalakeName, auditInfo); + backend.insert(metalake, false); + + CatalogEntity catalog = + createCatalog( + RandomIdGenerator.INSTANCE.nextId(), + NamespaceUtil.ofCatalog(metalakeName), + catalogName, + auditInfo); + backend.insert(catalog, false); + + RoleEntity roleWithDot = + createRoleEntity( + RandomIdGenerator.INSTANCE.nextId(), + AuthorizationUtils.ofRoleNamespace(metalakeName), + roleNameWithDot, + auditInfo, + catalogName); + backend.insert(roleWithDot, false); + + RoleEntity roleWithoutDot = + createRoleEntity( + RandomIdGenerator.INSTANCE.nextId(), + AuthorizationUtils.ofRoleNamespace(metalakeName), + roleNameWithoutDot, + auditInfo, + catalogName); + backend.insert(roleWithoutDot, false); + + Long metalakeId = MetalakeMetaService.getInstance().getMetalakeIdByName(metalakeName); + + Long roleIdWithDot = + RoleMetaService.getInstance().getRoleIdByMetalakeIdAndName(metalakeId, roleNameWithDot); + assertEquals(roleWithDot.id(), roleIdWithDot); + + Long roleIdWithoutDot = + RoleMetaService.getInstance().getRoleIdByMetalakeIdAndName(metalakeId, roleNameWithoutDot); + assertEquals(roleWithoutDot.id(), roleIdWithoutDot); + } + + @Test + public void testInsertRelationWithDotInRoleName() throws IOException { + AuditInfo auditInfo = + AuditInfo.builder().withCreator("creator").withCreateTime(Instant.now()).build(); + String metalakeName = "testMetalake"; + String catalogName = "catalog"; + String roleNameWithDot = "role.with.dot"; + + BaseMetalake metalake = + createBaseMakeLake(RandomIdGenerator.INSTANCE.nextId(), metalakeName, auditInfo); + backend.insert(metalake, false); + + CatalogEntity catalog = + createCatalog( + RandomIdGenerator.INSTANCE.nextId(), + NamespaceUtil.ofCatalog(metalakeName), + catalogName, + auditInfo); + backend.insert(catalog, false); + + RoleEntity role = + createRoleEntity( + RandomIdGenerator.INSTANCE.nextId(), + AuthorizationUtils.ofRoleNamespace(metalakeName), + roleNameWithDot, + auditInfo, + catalogName); + backend.insert(role, false); + + UserEntity user = + createUserEntity( + RandomIdGenerator.INSTANCE.nextId(), + AuthorizationUtils.ofUserNamespace(metalakeName), + "user", + auditInfo); + backend.insert(user, false); + + backend.insertRelation( + OWNER_REL, role.nameIdentifier(), role.type(), user.nameIdentifier(), user.type(), true); + assertEquals(1, countActiveOwnerRel(user.id())); + } + private boolean legacyRecordExistsInDB(Long id, Entity.EntityType entityType) { String tableName; String idColumnName; diff --git a/core/src/test/java/org/apache/gravitino/storage/relational/service/TestGroupMetaService.java b/core/src/test/java/org/apache/gravitino/storage/relational/service/TestGroupMetaService.java index 77cd9d110bc..5e90f0eb89f 100644 --- a/core/src/test/java/org/apache/gravitino/storage/relational/service/TestGroupMetaService.java +++ b/core/src/test/java/org/apache/gravitino/storage/relational/service/TestGroupMetaService.java @@ -27,6 +27,7 @@ import java.sql.SQLException; import java.sql.Statement; import java.time.Instant; +import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Optional; @@ -189,6 +190,33 @@ void testListGroups() throws IOException { } } } + + // ISSUE-6061: Test listGroupsByNamespace with revoked users + Function revokeUpdater = + group -> { + AuditInfo updateAuditInfo = + AuditInfo.builder() + .withCreator(group.auditInfo().creator()) + .withCreateTime(group.auditInfo().createTime()) + .withLastModifier("revokeGroup") + .withLastModifiedTime(Instant.now()) + .build(); + + return GroupEntity.builder() + .withNamespace(group.namespace()) + .withId(group.id()) + .withName(group.name()) + .withRoleNames(Collections.emptyList()) + .withRoleIds(Collections.emptyList()) + .withAuditInfo(updateAuditInfo) + .build(); + }; + + Assertions.assertNotNull(groupMetaService.updateGroup(group2.nameIdentifier(), revokeUpdater)); + actualGroups = + groupMetaService.listGroupsByNamespace( + AuthorizationUtils.ofGroupNamespace(metalakeName), true); + Assertions.assertEquals(expectGroups.size(), actualGroups.size()); } @Test diff --git a/core/src/test/java/org/apache/gravitino/storage/relational/service/TestUserMetaService.java b/core/src/test/java/org/apache/gravitino/storage/relational/service/TestUserMetaService.java index 0efd886ee4d..e93a83bafd6 100644 --- a/core/src/test/java/org/apache/gravitino/storage/relational/service/TestUserMetaService.java +++ b/core/src/test/java/org/apache/gravitino/storage/relational/service/TestUserMetaService.java @@ -27,6 +27,7 @@ import java.sql.SQLException; import java.sql.Statement; import java.time.Instant; +import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Optional; @@ -188,6 +189,33 @@ void testListUsers() throws IOException { } } } + + // ISSUE-6061: Test listUsersByNamespace with revoked users + Function revokeUpdater = + user -> { + AuditInfo updateAuditInfo = + AuditInfo.builder() + .withCreator(user.auditInfo().creator()) + .withCreateTime(user.auditInfo().createTime()) + .withLastModifier("revokeUser") + .withLastModifiedTime(Instant.now()) + .build(); + + return UserEntity.builder() + .withNamespace(user.namespace()) + .withId(user.id()) + .withName(user.name()) + .withRoleNames(Collections.emptyList()) + .withRoleIds(Collections.emptyList()) + .withAuditInfo(updateAuditInfo) + .build(); + }; + + Assertions.assertNotNull(userMetaService.updateUser(user1.nameIdentifier(), revokeUpdater)); + actualUsers = + userMetaService.listUsersByNamespace( + AuthorizationUtils.ofUserNamespace(metalakeName), true); + Assertions.assertEquals(expectUsers.size(), actualUsers.size()); } @Test diff --git a/dev/docker/iceberg-rest-server/iceberg-rest-server-dependency.sh b/dev/docker/iceberg-rest-server/iceberg-rest-server-dependency.sh index 2235313dc09..852b55b0206 100755 --- a/dev/docker/iceberg-rest-server/iceberg-rest-server-dependency.sh +++ b/dev/docker/iceberg-rest-server/iceberg-rest-server-dependency.sh @@ -38,6 +38,7 @@ cd ${gravitino_home} ./gradlew :bundles:gcp-bundle:jar ./gradlew :bundles:aws-bundle:jar ./gradlew :bundles:azure-bundle:jar +./gradlew :bundles:aliyun-bundle:jar # prepare bundle jar cd ${iceberg_rest_server_dir} @@ -45,6 +46,7 @@ mkdir -p bundles cp ${gravitino_home}/bundles/gcp-bundle/build/libs/gravitino-gcp-bundle-*.jar bundles/ cp ${gravitino_home}/bundles/aws-bundle/build/libs/gravitino-aws-bundle-*.jar bundles/ cp ${gravitino_home}/bundles/azure-bundle/build/libs/gravitino-azure-bundle-*.jar bundles/ +cp ${gravitino_home}/bundles/aliyun-bundle/build/libs/gravitino-aliyun-bundle-*.jar bundles/ iceberg_gcp_bundle="iceberg-gcp-bundle-1.5.2.jar" if [ ! -f "bundles/${iceberg_gcp_bundle}" ]; then @@ -61,6 +63,11 @@ if [ ! -f "bundles/${iceberg_azure_bundle}" ]; then curl -L -s -o bundles/${iceberg_azure_bundle} https://repo1.maven.org/maven2/org/apache/iceberg/iceberg-azure-bundle/1.5.2/${iceberg_azure_bundle} fi +iceberg_aliyun_bundle="iceberg-aliyun-bundle-1.5.2.jar" +if [ ! -f "bundles/${iceberg_aliyun_bundle}" ]; then + curl -L -s -o bundles/${iceberg_aliyun_bundle} https://repo1.maven.org/maven2/org/apache/iceberg/iceberg-aliyun-bundle/1.5.2/${iceberg_aliyun_bundle} +fi + # download jdbc driver curl -L -s -o bundles/sqlite-jdbc-3.42.0.0.jar https://repo1.maven.org/maven2/org/xerial/sqlite-jdbc/3.42.0.0/sqlite-jdbc-3.42.0.0.jar diff --git a/dev/docker/iceberg-rest-server/rewrite_config.py b/dev/docker/iceberg-rest-server/rewrite_config.py index b10cdb4bfb7..8b9b42a531c 100755 --- a/dev/docker/iceberg-rest-server/rewrite_config.py +++ b/dev/docker/iceberg-rest-server/rewrite_config.py @@ -36,6 +36,13 @@ "GRAVITINO_AZURE_TENANT_ID" : "azure-tenant-id", "GRAVITINO_AZURE_CLIENT_ID" : "azure-client-id", "GRAVITINO_AZURE_CLIENT_SECRET" : "azure-client-secret", + "GRAVITINO_OSS_ACCESS_KEY": "oss-access-key-id", + "GRAVITINO_OSS_SECRET_KEY": "oss-secret-access-key", + "GRAVITINO_OSS_ENDPOINT": "oss-endpoint", + "GRAVITINO_OSS_REGION": "oss-region", + "GRAVITINO_OSS_ROLE_ARN": "oss-role-arn", + "GRAVITINO_OSS_EXTERNAL_ID": "oss-external-id", + } init_config = { diff --git a/docs/assets/gravitino-model-arch.png b/docs/assets/gravitino-model-arch.png index de10689c076..5f43f1c29af 100644 Binary files a/docs/assets/gravitino-model-arch.png and b/docs/assets/gravitino-model-arch.png differ diff --git a/docs/assets/metadata-model.png b/docs/assets/metadata-model.png deleted file mode 100644 index 143cb292bba..00000000000 Binary files a/docs/assets/metadata-model.png and /dev/null differ diff --git a/docs/docker-image-details.md b/docs/docker-image-details.md index c723c009d93..48b3bd191a1 100644 --- a/docs/docker-image-details.md +++ b/docs/docker-image-details.md @@ -59,6 +59,8 @@ docker run --rm -d -p 9001:9001 apache/gravitino-iceberg-rest:0.7.0-incubating ``` Changelog +- apache/gravitino-iceberg-rest:0.8.0-incubating + - Supports OSS and ADLS storage. - apache/gravitino-iceberg-rest:0.7.0-incubating - Using JDBC catalog backend. diff --git a/docs/hadoop-catalog.md b/docs/hadoop-catalog.md index 57da399b12c..d5427844ad8 100644 --- a/docs/hadoop-catalog.md +++ b/docs/hadoop-catalog.md @@ -23,9 +23,12 @@ Hadoop 3. If there's any compatibility issue, please create an [issue](https://g Besides the [common catalog properties](./gravitino-server-config.md#apache-gravitino-catalog-properties-configuration), the Hadoop catalog has the following properties: -| Property Name | Description | Default Value | Required | Since Version | -|---------------|-------------------------------------------------|---------------|----------|---------------| -| `location` | The storage location managed by Hadoop catalog. | (none) | No | 0.5.0 | +| Property Name | Description | Default Value | Required | Since Version | +|------------------------|----------------------------------------------------|---------------|----------|------------------| +| `location` | The storage location managed by Hadoop catalog. | (none) | No | 0.5.0 | +| `credential-providers` | The credential provider types, separated by comma. | (none) | No | 0.8.0-incubating | + +Please refer to [Credential vending](./security/credential-vending.md) for more details about credential vending. Apart from the above properties, to access fileset like HDFS, S3, GCS, OSS or custom fileset, you need to configure the following extra properties. @@ -50,6 +53,10 @@ Apart from the above properties, to access fileset like HDFS, S3, GCS, OSS or cu | `s3-access-key-id` | The access key of the AWS S3. | (none) | Yes if it's a S3 fileset. | 0.7.0-incubating | | `s3-secret-access-key` | The secret key of the AWS S3. | (none) | Yes if it's a S3 fileset. | 0.7.0-incubating | +Please refer to [S3 credentials](./security/credential-vending.md#s3-credentials) for credential related configurations. + +At the same time, you need to place the corresponding bundle jar [`gravitino-aws-bundle-${version}.jar`](https://repo1.maven.org/maven2/org/apache/gravitino/gravitino-aws-bundle/) in the directory `${GRAVITINO_HOME}/catalogs/hadoop/libs`. + #### GCS fileset | Configuration item | Description | Default value | Required | Since version | @@ -58,6 +65,10 @@ Apart from the above properties, to access fileset like HDFS, S3, GCS, OSS or cu | `default-filesystem-provider` | The name default filesystem providers of this Hadoop catalog if users do not specify the scheme in the URI. Default value is `builtin-local`, for GCS, if we set this value, we can omit the prefix 'gs://' in the location. | `builtin-local` | No | 0.7.0-incubating | | `gcs-service-account-file` | The path of GCS service account JSON file. | (none) | Yes if it's a GCS fileset. | 0.7.0-incubating | +Please refer to [GCS credentials](./security/credential-vending.md#gcs-credentials) for credential related configurations. + +In the meantime, you need to place the corresponding bundle jar [`gravitino-gcp-bundle-${version}.jar`](https://repo1.maven.org/maven2/org/apache/gravitino/gravitino-gcp-bundle/) in the directory `${GRAVITINO_HOME}/catalogs/hadoop/libs`. + #### OSS fileset | Configuration item | Description | Default value | Required | Since version | @@ -68,6 +79,10 @@ Apart from the above properties, to access fileset like HDFS, S3, GCS, OSS or cu | `oss-access-key-id` | The access key of the Aliyun OSS. | (none) | Yes if it's a OSS fileset. | 0.7.0-incubating | | `oss-secret-access-key` | The secret key of the Aliyun OSS. | (none) | Yes if it's a OSS fileset. | 0.7.0-incubating | +Please refer to [OSS credentials](./security/credential-vending.md#oss-credentials) for credential related configurations. + +In the meantime, you need to place the corresponding bundle jar [`gravitino-aliyun-bundle-${version}.jar`](https://repo1.maven.org/maven2/org/apache/gravitino/gravitino-aliyun-bundle/) in the directory `${GRAVITINO_HOME}/catalogs/hadoop/libs`. + #### Azure Blob Storage fileset | Configuration item | Description | Default value | Required | Since version | @@ -77,6 +92,9 @@ Apart from the above properties, to access fileset like HDFS, S3, GCS, OSS or cu | `azure-storage-account-name ` | The account name of Azure Blob Storage. | (none) | Yes if it's a Azure Blob Storage fileset. | 0.8.0-incubating | | `azure-storage-account-key` | The account key of Azure Blob Storage. | (none) | Yes if it's a Azure Blob Storage fileset. | 0.8.0-incubating | +Please refer to [ADLS credentials](./security/credential-vending.md#adls-credentials) for credential related configurations. + +Similar to the above, you need to place the corresponding bundle jar [`gravitino-azure-bundle-${version}.jar`](https://repo1.maven.org/maven2/org/apache/gravitino/gravitino-azure-bundle/) in the directory `${GRAVITINO_HOME}/catalogs/hadoop/libs`. :::note - Gravitino contains builtin file system providers for local file system(`builtin-local`) and HDFS(`builtin-hdfs`), that is to say if `filesystem-providers` is not set, Gravitino will still support local file system and HDFS. Apart from that, you can set the `filesystem-providers` to support other file systems like S3, GCS, OSS or custom file system. @@ -138,7 +156,8 @@ The Hadoop catalog supports creating, updating, deleting, and listing schema. | `authentication.impersonation-enable` | Whether to enable impersonation for this schema of the Hadoop catalog. | The parent(catalog) value | No | 0.6.0-incubating | | `authentication.type` | The type of authentication for this schema of Hadoop catalog , currently we only support `kerberos`, `simple`. | The parent(catalog) value | No | 0.6.0-incubating | | `authentication.kerberos.principal` | The principal of the Kerberos authentication for this schema. | The parent(catalog) value | No | 0.6.0-incubating | -| `authentication.kerberos.keytab-uri` | The URI of The keytab for the Kerberos authentication for this scheam. | The parent(catalog) value | No | 0.6.0-incubating | +| `authentication.kerberos.keytab-uri` | The URI of The keytab for the Kerberos authentication for this schema. | The parent(catalog) value | No | 0.6.0-incubating | +| `credential-providers` | The credential provider types, separated by comma. | (none) | No | 0.8.0-incubating | ### Schema operations @@ -158,6 +177,13 @@ Refer to [Schema operation](./manage-fileset-metadata-using-gravitino.md#schema- | `authentication.type` | The type of authentication for Hadoop catalog fileset, currently we only support `kerberos`, `simple`. | The parent(schema) value | No | 0.6.0-incubating | | `authentication.kerberos.principal` | The principal of the Kerberos authentication for the fileset. | The parent(schema) value | No | 0.6.0-incubating | | `authentication.kerberos.keytab-uri` | The URI of The keytab for the Kerberos authentication for the fileset. | The parent(schema) value | No | 0.6.0-incubating | +| `credential-providers` | The credential provider types, separated by comma. | (none) | No | 0.8.0-incubating | + +Credential providers can be specified in several places, as listed below. Gravitino checks the `credential-provider` setting in the following order of precedence: + +1. Fileset properties +2. Schema properties +3. Catalog properties ### Fileset operations diff --git a/docs/how-to-use-gvfs.md b/docs/how-to-use-gvfs.md index 6ac5079a6b3..d32ad3da672 100644 --- a/docs/how-to-use-gvfs.md +++ b/docs/how-to-use-gvfs.md @@ -514,7 +514,7 @@ fs = gvfs.GravitinoVirtualFileSystem(server_uri="http://localhost:8090", metalak :::note -Gravitino python client does not support customized filesets defined by users due to the limit of `fsspec` library. +Gravitino python client does not support [customized file systems](hadoop-catalog.md#how-to-custom-your-own-hcfs-file-system-fileset) defined by users due to the limit of `fsspec` library. ::: diff --git a/docs/iceberg-rest-service.md b/docs/iceberg-rest-service.md index f21ca35a43a..d42fc98b4dd 100644 --- a/docs/iceberg-rest-service.md +++ b/docs/iceberg-rest-service.md @@ -27,9 +27,9 @@ The Apache Gravitino Iceberg REST Server follows the [Apache Iceberg REST API sp ## Server management There are three deployment scenarios for Gravitino Iceberg REST server: -- A standalone server in a standalone Gravitino Iceberg REST server package. -- A standalone server in the Gravitino server package. -- An auxiliary service embedded in the Gravitino server. +- A standalone server in a standalone Gravitino Iceberg REST server package, the classpath is `libs`. +- A standalone server in the Gravitino server package, the classpath is `iceberg-rest-server/libs`. +- An auxiliary service embedded in the Gravitino server, the classpath is `iceberg-rest-server/libs`. For detailed instructions on how to build and install the Gravitino server package, please refer to [How to build](./how-to-build.md) and [How to install](./how-to-install.md). To build the Gravitino Iceberg REST server package, use the command `./gradlew compileIcebergRESTServer -x test`. Alternatively, to create the corresponding compressed package in the distribution directory, use `./gradlew assembleIcebergRESTServer -x test`. The Gravitino Iceberg REST server package includes the following files: @@ -100,29 +100,23 @@ The detailed configuration items are as follows: | `gravitino.iceberg-rest.authentication.kerberos.keytab-fetch-timeout-sec` | The fetch timeout of retrieving Kerberos keytab from `authentication.kerberos.keytab-uri`. | 60 | No | 0.7.0-incubating | +### Credential vending + +Please refer to [Credential vending](./security/credential-vending.md) for more details. + ### Storage #### S3 configuration -Gravitino Iceberg REST service supports using static S3 secret key or generating temporary token to access S3 data. - | Configuration item | Description | Default value | Required | Since Version | |----------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|------------------------------------------------|------------------| | `gravitino.iceberg-rest.io-impl` | The IO implementation for `FileIO` in Iceberg, use `org.apache.iceberg.aws.s3.S3FileIO` for S3. | (none) | No | 0.6.0-incubating | -| `gravitino.iceberg-rest.credential-provider-type` | Deprecated, please use `gravitino.iceberg-rest.credential-providers` instead. | (none) | No | 0.7.0-incubating | -| `gravitino.iceberg-rest.credential-providers` | Supports `s3-token` and `s3-secret-key` for S3. `s3-token` generates a temporary token according to the query data path while `s3-secret-key` using the s3 secret access key to access S3 data. | (none) | No | 0.7.0-incubating | -| `gravitino.iceberg-rest.s3-access-key-id` | The static access key ID used to access S3 data. | (none) | No | 0.6.0-incubating | -| `gravitino.iceberg-rest.s3-secret-access-key` | The static secret access key used to access S3 data. | (none) | No | 0.6.0-incubating | | `gravitino.iceberg-rest.s3-endpoint` | An alternative endpoint of the S3 service, This could be used for S3FileIO with any s3-compatible object storage service that has a different endpoint, or access a private S3 endpoint in a virtual private cloud. | (none) | No | 0.6.0-incubating | | `gravitino.iceberg-rest.s3-region` | The region of the S3 service, like `us-west-2`. | (none) | No | 0.6.0-incubating | -| `gravitino.iceberg-rest.s3-role-arn` | The ARN of the role to access the S3 data. | (none) | Yes, when `credential-providers` is `s3-token` | 0.7.0-incubating | -| `gravitino.iceberg-rest.s3-external-id` | The S3 external id to generate token, only used when `credential-providers` is `s3-token`. | (none) | No | 0.7.0-incubating | -| `gravitino.iceberg-rest.s3-token-expire-in-secs` | The S3 session token expire time in secs, it couldn't exceed the max session time of the assumed role, only used when `credential-providers` is `s3-token`. | 3600 | No | 0.7.0-incubating | -| `gravitino.iceberg-rest.s3-token-service-endpoint` | An alternative endpoint of the S3 token service, This could be used with s3-compatible object storage service like MINIO that has a different STS endpoint. | (none) | No | 0.8.0-incubating | For other Iceberg s3 properties not managed by Gravitino like `s3.sse.type`, you could config it directly by `gravitino.iceberg-rest.s3.sse.type`. -If you set `credential-providers` explicitly, please downloading [Gravitino AWS bundle jar](https://mvnrepository.com/artifact/org.apache.gravitino/aws-bundle), and place it to the classpath of Iceberg REST server. +Please refer to [S3 credentials](./security/credential-vending.md#s3-credentials) for credential related configurations. :::info To configure the JDBC catalog backend, set the `gravitino.iceberg-rest.warehouse` parameter to `s3://{bucket_name}/${prefix_name}`. For the Hive catalog backend, set `gravitino.iceberg-rest.warehouse` to `s3a://{bucket_name}/${prefix_name}`. Additionally, download the [Iceberg AWS bundle](https://mvnrepository.com/artifact/org.apache.iceberg/iceberg-aws-bundle) and place it in the classpath of Iceberg REST server. @@ -130,24 +124,15 @@ To configure the JDBC catalog backend, set the `gravitino.iceberg-rest.warehouse #### OSS configuration -Gravitino Iceberg REST service supports using static access-key-id and secret-access-key or generating temporary token to access OSS data. - | Configuration item | Description | Default value | Required | Since Version | |---------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------|------------------------------------------------------|------------------| | `gravitino.iceberg-rest.io-impl` | The IO implementation for `FileIO` in Iceberg, use `org.apache.iceberg.aliyun.oss.OSSFileIO` for OSS. | (none) | No | 0.6.0-incubating | -| `gravitino.iceberg-rest.credential-provider-type` | Deprecated, please use `gravitino.iceberg-rest.credential-providers` instead. | (none) | No | 0.7.0-incubating | -| `gravitino.iceberg-rest.credential-providers` | Supports `oss-token` and `oss-secret-key` for OSS. `oss-token` generates a temporary token according to the query data path while `oss-secret-key` using the oss secret access key to access S3 data. | (none) | No | 0.7.0-incubating | -| `gravitino.iceberg-rest.oss-access-key-id` | The static access key ID used to access OSS data. | (none) | No | 0.7.0-incubating | -| `gravitino.iceberg-rest.oss-secret-access-key` | The static secret access key used to access OSS data. | (none) | No | 0.7.0-incubating | | `gravitino.iceberg-rest.oss-endpoint` | The endpoint of Aliyun OSS service. | (none) | No | 0.7.0-incubating | | `gravitino.iceberg-rest.oss-region` | The region of the OSS service, like `oss-cn-hangzhou`, only used when `credential-providers` is `oss-token`. | (none) | No | 0.8.0-incubating | -| `gravitino.iceberg-rest.oss-role-arn` | The ARN of the role to access the OSS data, only used when `credential-providers` is `oss-token`. | (none) | Yes, when `credential-provider-type` is `oss-token`. | 0.8.0-incubating | -| `gravitino.iceberg-rest.oss-external-id` | The OSS external id to generate token, only used when `credential-providers` is `oss-token`. | (none) | No | 0.8.0-incubating | -| `gravitino.iceberg-rest.oss-token-expire-in-secs` | The OSS security token expire time in secs, only used when `credential-providers` is `oss-token`. | 3600 | No | 0.8.0-incubating | For other Iceberg OSS properties not managed by Gravitino like `client.security-token`, you could config it directly by `gravitino.iceberg-rest.client.security-token`. -If you set `credential-providers` explicitly, please downloading [Gravitino Aliyun bundle jar](https://mvnrepository.com/artifact/org.apache.gravitino/aliyun-bundle), and place it to the classpath of Iceberg REST server. +Please refer to [OSS credentials](./security/credential-vending.md#oss-credentials) for credential related configurations. :::info Please set the `gravitino.iceberg-rest.warehouse` parameter to `oss://{bucket_name}/${prefix_name}`. Additionally, download the [Aliyun OSS SDK](https://gosspublic.alicdn.com/sdks/java/aliyun_java_sdk_3.10.2.zip) and copy `aliyun-sdk-oss-3.10.2.jar`, `hamcrest-core-1.1.jar`, `jdom2-2.0.6.jar` in the classpath of Iceberg REST server, `iceberg-rest-server/libs` for the auxiliary server, `libs` for the standalone server. @@ -160,16 +145,14 @@ Supports using static GCS credential file or generating GCS token to access GCS | Configuration item | Description | Default value | Required | Since Version | |---------------------------------------------------|----------------------------------------------------------------------------------------------------|---------------|----------|------------------| | `gravitino.iceberg-rest.io-impl` | The io implementation for `FileIO` in Iceberg, use `org.apache.iceberg.gcp.gcs.GCSFileIO` for GCS. | (none) | No | 0.6.0-incubating | -| `gravitino.iceberg-rest.credential-provider-type` | Deprecated, please use `gravitino.iceberg-rest.credential-providers` instead. | (none) | No | 0.7.0-incubating | -| `gravitino.iceberg-rest.credential-providers` | Supports `gcs-token`, generates a temporary token according to the query data path. | (none) | No | 0.7.0-incubating | -| `gravitino.iceberg-rest.gcs-credential-file-path` | Deprecated, please use `gravitino.iceberg-rest.gcs-service-account-file` instead. | (none) | No | 0.7.0-incubating | -| `gravitino.iceberg-rest.gcs-service-account-file` | The location of GCS credential file, only used when `credential-provider-type` is `gcs-token`. | (none) | No | 0.8.0-incubating | For other Iceberg GCS properties not managed by Gravitino like `gcs.project-id`, you could config it directly by `gravitino.iceberg-rest.gcs.project-id`. -If you set `credential-providers` explicitly, please downloading [Gravitino GCP bundle jar](https://mvnrepository.com/artifact/org.apache.gravitino/gcp-bundle), and place it to the classpath of Iceberg REST server. +Please refer to [GCS credentials](./security/credential-vending.md#gcs-credentials) for credential related configurations. -Please make sure the credential file is accessible by Gravitino, like using `export GOOGLE_APPLICATION_CREDENTIALS=/xx/application_default_credentials.json` before Gravitino Iceberg REST server is started. +:::note +Please ensure that the credential file can be accessed by the Gravitino server. For example, if the server is running on a GCE machine, or you can set the environment variable as `export GOOGLE_APPLICATION_CREDENTIALS=/xx/application_default_credentials.json`, even when the `gcs-service-account-file` has already been configured. +::: :::info Please set `gravitino.iceberg-rest.warehouse` to `gs://{bucket_name}/${prefix_name}`, and download [Iceberg gcp bundle](https://mvnrepository.com/artifact/org.apache.iceberg/iceberg-gcp-bundle) and place it to the classpath of Gravitino Iceberg REST server, `iceberg-rest-server/libs` for the auxiliary server, `libs` for the standalone server. @@ -177,23 +160,13 @@ Please set `gravitino.iceberg-rest.warehouse` to `gs://{bucket_name}/${prefix_na #### ADLS -Gravitino Iceberg REST service supports generating SAS token to access ADLS data. - | Configuration item | Description | Default value | Required | Since Version | |-----------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|----------|------------------| | `gravitino.iceberg-rest.io-impl` | The IO implementation for `FileIO` in Iceberg, use `org.apache.iceberg.azure.adlsv2.ADLSFileIO` for ADLS. | (none) | Yes | 0.8.0-incubating | -| `gravitino.iceberg-rest.credential-provider-type` | Deprecated, please use `gravitino.iceberg-rest.credential-providers` instead. | (none) | No | 0.7.0-incubating | -| `gravitino.iceberg-rest.credential-providers` | Supports `adls-token` and `azure-account-key`. `adls-token` generates a temporary token according to the query data path while `azure-account-key` uses a storage account key to access ADLS data. | (none) | Yes | 0.8.0-incubating | -| `gravitino.iceberg-rest.azure-storage-account-name` | The static storage account name used to access ADLS data. | (none) | Yes | 0.8.0-incubating | -| `gravitino.iceberg-rest.azure-storage-account-key` | The static storage account key used to access ADLS data. | (none) | Yes | 0.8.0-incubating | -| `gravitino.iceberg-rest.azure-tenant-id` | Azure Active Directory (AAD) tenant ID, only used when `credential-providers` is `adls-token`. | (none) | Yes | 0.8.0-incubating | -| `gravitino.iceberg-rest.azure-client-id` | Azure Active Directory (AAD) client ID used for authentication, only used when `credential-providers` is `adls-token`. | (none) | Yes | 0.8.0-incubating | -| `gravitino.iceberg-rest.azure-client-secret` | Azure Active Directory (AAD) client secret used for authentication, only used when `credential-providers` is `adls-token`. | (none) | Yes | 0.8.0-incubating | -| `gravitino.iceberg-rest.adls-token-expire-in-secs` | The ADLS SAS token expire time in secs, only used when `credential-providers` is `adls-token`. | 3600 | No | 0.8.0-incubating | For other Iceberg ADLS properties not managed by Gravitino like `adls.read.block-size-bytes`, you could config it directly by `gravitino.iceberg-rest.adls.read.block-size-bytes`. -If you set `credential-providers` explicitly, please downloading [Gravitino Azure bundle jar](https://mvnrepository.com/artifact/org.apache.gravitino/azure-bundle), and place it to the classpath of Iceberg REST server. +Please refer to [ADLS credentials](./security/credential-vending.md#adls-credentials) for credential related configurations. :::info Please set `gravitino.iceberg-rest.warehouse` to `abfs[s]://{container-name}@{storage-account-name}.dfs.core.windows.net/{path}`, and download the [Iceberg Azure bundle](https://mvnrepository.com/artifact/org.apache.iceberg/iceberg-azure-bundle) and place it in the classpath of Iceberg REST server. @@ -441,7 +414,7 @@ SELECT * FROM dml.test; You could run Gravitino Iceberg REST server though docker container: ```shell -docker run -d -p 9001:9001 apache/gravitino-iceberg-rest:0.7.0-incubating +docker run -d -p 9001:9001 apache/gravitino-iceberg-rest:0.8.0-incubating ``` Gravitino Iceberg REST server in docker image could access local storage by default, you could set the following environment variables if the storage is cloud/remote storage like S3, please refer to [storage section](#storage) for more details. @@ -464,6 +437,12 @@ Gravitino Iceberg REST server in docker image could access local storage by defa | `GRAVITINO_AZURE_TENANT_ID` | `gravitino.iceberg-rest.azure-tenant-id` | 0.8.0-incubating | | `GRAVITINO_AZURE_CLIENT_ID` | `gravitino.iceberg-rest.azure-client-id` | 0.8.0-incubating | | `GRAVITINO_AZURE_CLIENT_SECRET` | `gravitino.iceberg-rest.azure-client-secret` | 0.8.0-incubating | +| `GRAVITINO_OSS_ACCESS_KEY` | `gravitino.iceberg-rest.oss-access-key-id` | 0.8.0-incubating | +| `GRAVITINO_OSS_SECRET_KEY` | `gravitino.iceberg-rest.oss-secret-access-key` | 0.8.0-incubating | +| `GRAVITINO_OSS_ENDPOINT` | `gravitino.iceberg-rest.oss-endpoint` | 0.8.0-incubating | +| `GRAVITINO_OSS_REGION` | `gravitino.iceberg-rest.oss-region` | 0.8.0-incubating | +| `GRAVITINO_OSS_ROLE_ARN` | `gravitino.iceberg-rest.oss-role-arn` | 0.8.0-incubating | +| `GRAVITINO_OSS_EXTERNAL_ID` | `gravitino.iceberg-rest.oss-external-id` | 0.8.0-incubating | The below environment is deprecated, please use the corresponding configuration items instead. diff --git a/docs/index.md b/docs/index.md index 2bc4d53b3f6..401e6c1d0a9 100644 --- a/docs/index.md +++ b/docs/index.md @@ -56,6 +56,8 @@ REST API and the Java SDK. You can use either to manage metadata. See how to manage fileset metadata. * [Manage messaging metadata using Gravitino](./manage-messaging-metadata-using-gravitino.md) to learn how to manage messaging metadata. +* [Manage model metadata using Gravitino](./manage-model-metadata-using-gravitino.md) to learn how to manage + model metadata. Also, you can find the complete REST API definition in [Gravitino Open API](./api/rest/gravitino-rest-api), @@ -88,6 +90,10 @@ Gravitino currently supports the following catalogs: * [**Kafka catalog**](./kafka-catalog.md) +**Model catalogs:** + +* [**Model catalog**](./model-catalog.md) + ## Apache Gravitino playground To experience Gravitino with other components easily, Gravitino provides a playground to run. It @@ -119,6 +125,7 @@ Gravitino supports different catalogs to manage the metadata in different source * [Hadoop catalog](./hadoop-catalog.md): a complete guide to using Gravitino to manage fileset using Hadoop Compatible File System (HCFS). * [Kafka catalog](./kafka-catalog.md): a complete guide to using Gravitino to manage Kafka topics metadata. +* [Model catalog](./model-catalog.md): a complete guide to using Gravitino to manage model metadata. ### Governance diff --git a/docs/kafka-catalog.md b/docs/kafka-catalog.md index 0c32bc59b76..b9901ec9d70 100644 --- a/docs/kafka-catalog.md +++ b/docs/kafka-catalog.md @@ -15,7 +15,7 @@ One Kafka catalog corresponds to one Kafka cluster. ### Catalog properties -Besides the [common catalog properties](./gravitino-server-config.md#gravitino-catalog-properties-configuration), the Kafka catalog has the following properties: +Besides the [common catalog properties](./gravitino-server-config.md#apache-gravitino-catalog-properties-configuration), the Kafka catalog has the following properties: | Property Name | Description | Default Value | Required | Since Version | |---------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|----------|---------------| diff --git a/docs/manage-metalake-using-gravitino.md b/docs/manage-metalake-using-gravitino.md index 09d5c2dc633..ae15bbbb15e 100644 --- a/docs/manage-metalake-using-gravitino.md +++ b/docs/manage-metalake-using-gravitino.md @@ -11,7 +11,7 @@ import TabItem from '@theme/TabItem'; This page introduces how to create, modify, view, and delete [metalakes](./glossary.md#metalake) by using Gravitino. -## Prerequsites +## Prerequisites You have installed and launched Gravitino. For more details, see [Get started](./getting-started.md). diff --git a/docs/manage-model-metadata-using-gravitino.md b/docs/manage-model-metadata-using-gravitino.md new file mode 100644 index 00000000000..519f79b7f7a --- /dev/null +++ b/docs/manage-model-metadata-using-gravitino.md @@ -0,0 +1,637 @@ +--- +title: Manage model metadata using Gravitino +slug: /manage-model-metadata-using-gravitino +date: 2024-12-26 +keyword: Gravitino model metadata manage +license: This software is licensed under the Apache License version 2. +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This page introduces how to manage model metadata in Apache Gravitino. Gravitino model catalog +is a kind of model registry, which provides the ability to manage machine learning models' +versioned metadata. It follows the typical Gravitino 3-level namespace (catalog, schema, and +model) and supports managing the versions for each model. + +Currently, it supports model and model version registering, listing, loading, and deleting. + +To use the model catalog, please make sure that: + + - The Gravitino server has started, and is serving at, e.g. [http://localhost:8090](http://localhost:8090). + - A metalake has been created and [enabled](./manage-metalake-using-gravitino.md#enable-a-metalake) + +## Catalog operations + +### Create a catalog + +:::info +For a model catalog, you must specify the catalog `type` as `MODEL` when creating the catalog. +Please also be aware that the `provider` is not required for a model catalog. +::: + +You can create a catalog by sending a `POST` request to the `/api/metalakes/{metalake_name}/catalogs` +endpoint or just use the Gravitino Java/Python client. The following is an example of creating a +catalog: + + + + +```shell +curl -X POST -H "Accept: application/vnd.gravitino.v1+json" \ +-H "Content-Type: application/json" -d '{ + "name": "model_catalog", + "type": "MODEL", + "comment": "This is a model catalog", + "properties": { + "k1": "v1" + } +}' http://localhost:8090/api/metalakes/example/catalogs +``` + + + + +```java +GravitinoClient gravitinoClient = GravitinoClient + .builder("http://localhost:8090") + .withMetalake("example") + .build(); + +Map properties = ImmutableMap.builder() + .put("k1", "v1") + .build(); + +Catalog catalog = gravitinoClient.createCatalog( + "model_catalog", + Type.MODEL, + "This is a model catalog", + properties); +``` + + + + +```python +gravitino_client: GravitinoClient = GravitinoClient(uri="http://localhost:8090", metalake_name="example") +catalog = gravitino_client.create_catalog(name="model_catalog", + type=Catalog.Type.MODEL, + provider=None, + comment="This is a model catalog", + properties={"k1": "v1"}) +``` + + + + +### Load a catalog + +Refer to [Load a catalog](./manage-relational-metadata-using-gravitino.md#load-a-catalog) +in relational catalog for more details. For a model catalog, the load operation is the same. + +### Alter a catalog + +Refer to [Alter a catalog](./manage-relational-metadata-using-gravitino.md#alter-a-catalog) +in relational catalog for more details. For a model catalog, the alter operation is the same. + +### Drop a catalog + +Refer to [Drop a catalog](./manage-relational-metadata-using-gravitino.md#drop-a-catalog) +in relational catalog for more details. For a model catalog, the drop operation is the same. + +### List all catalogs in a metalake + +Please refer to [List all catalogs in a metalake](./manage-relational-metadata-using-gravitino.md#list-all-catalogs-in-a-metalake) +in relational catalog for more details. For a model catalog, the list operation is the same. + +### List all catalogs' information in a metalake + +Please refer to [List all catalogs' information in a metalake](./manage-relational-metadata-using-gravitino.md#list-all-catalogs-information-in-a-metalake) +in relational catalog for more details. For a model catalog, the list operation is the same. + +## Schema operations + +`Schema` is a virtual namespace in a model catalog, which is used to organize the models. It +is similar to the concept of `schema` in the relational catalog. + +:::tip +Users should create a metalake and a catalog before creating a schema. +::: + +### Create a schema + +You can create a schema by sending a `POST` request to the `/api/metalakes/{metalake_name}/catalogs/{catalog_name}/schemas` +endpoint or just use the Gravitino Java/Python client. The following is an example of creating a +schema: + + + + +```shell +curl -X POST -H "Accept: application/vnd.gravitino.v1+json" \ +-H "Content-Type: application/json" -d '{ + "name": "model_schema", + "comment": "This is a model schema", + "properties": { + "k1": "v1" + } +}' http://localhost:8090/api/metalakes/example/catalogs/model_catalog/schemas +``` + + + + +```java +GravitinoClient gravitinoClient = GravitinoClient + .builder("http://localhost:8090") + .withMetalake("example") + .build(); + +Catalog catalog = gravitinoClient.loadCatalog("model_catalog"); + +SupportsSchemas supportsSchemas = catalog.asSchemas(); + +Map schemaProperties = ImmutableMap.builder() + .put("k1", "v1") + .build(); +Schema schema = supportsSchemas.createSchema( + "model_schema", + "This is a schema", + schemaProperties); +// ... +``` + + + + +```python +gravitino_client: GravitinoClient = GravitinoClient(uri="http://localhost:8090", metalake_name="example") + +catalog: Catalog = gravitino_client.load_catalog(name="model_catalog") +catalog.as_schemas().create_schema(name="model_schema", + comment="This is a schema", + properties={"k1": "v1"}) +``` + + + + +### Load a schema + +Please refer to [Load a schema](./manage-relational-metadata-using-gravitino.md#load-a-schema) +in relational catalog for more details. For a model catalog, the schema load operation is the +same. + +### Alter a schema + +Please refer to [Alter a schema](./manage-relational-metadata-using-gravitino.md#alter-a-schema) +in relational catalog for more details. For a model catalog, the schema alter operation is the +same. + +### Drop a schema + +Please refer to [Drop a schema](./manage-relational-metadata-using-gravitino.md#drop-a-schema) +in relational catalog for more details. For a model catalog, the schema drop operation is the +same. + +Note that the drop operation will delete all the model metadata under this schema if `cascade` +set to `true`. + +### List all schemas under a catalog + +Please refer to [List all schemas under a catalog](./manage-relational-metadata-using-gravitino.md#list-all-schemas-under-a-catalog) +in relational catalog for more details. For a model catalog, the schema list operation is the +same. + +## Model operations + +:::tip + - Users should create a metalake, a catalog, and a schema before creating a model. +::: + +### Register a model + +You can register a model by sending a `POST` request to the `/api/metalakes/{metalake_name} +/catalogs/{catalog_name}/schemas/{schema_name}/models` endpoint or just use the Gravitino +Java/Python client. The following is an example of creating a model: + + + + +```shell +curl -X POST -H "Accept: application/vnd.gravitino.v1+json" \ +-H "Content-Type: application/json" -d '{ + "name": "example_model", + "comment": "This is an example model", + "properties": { + "k1": "v1" + } +}' http://localhost:8090/api/metalakes/example/catalogs/model_catalog/schemas/model_schema/models +``` + + + + +```java +GravitinoClient gravitinoClient = GravitinoClient + .builder("http://localhost:8090") + .withMetalake("example") + .build(); + +Catalog catalog = gravitinoClient.loadCatalog("model_catalog"); +Map propertiesMap = ImmutableMap.builder() + .put("k1", "v1") + .build(); + +Model model = catalog.asModelCatalog().registerModel( + NameIdentifier.of("model_schema", "example_model"), + "This is an example model", + propertiesMap); +``` + + + + +```python +gravitino_client: GravitinoClient = GravitinoClient(uri="http://localhost:8090", metalake_name="example") + +catalog: Catalog = gravitino_client.load_catalog(name="model_catalog") +model: Model = catalog.as_model_catalog().register_model(ident=NameIdentifier.of("model_schema", "example_model"), + comment="This is an example model", + properties={"k1": "v1"}) +``` + + + + +### Get a model + +You can get a model by sending a `GET` request to the `/api/metalakes/{metalake_name} +/catalogs/{catalog_name}/schemas/{schema_name}/models/{model_name}` endpoint or by using the +Gravitino Java/Python client. The following is an example of getting a model: + + + + +```shell +curl -X GET -H "Accept: application/vnd.gravitino.v1+json" \ +-H "Content-Type: application/json" \ +http://localhost:8090/api/metalakes/example/catalogs/model_catalog/schemas/model_schema/models/example_model +``` + + + + +```java +// ... +Catalog catalog = gravitinoClient.loadCatalog("model_catalog"); +Model model = catalog.asModelCatalog().getModel(NameIdentifier.of("model_schema", "example_model")); +// ... +``` + + + + +```python +gravitino_client: GravitinoClient = GravitinoClient(uri="http://localhost:8090", metalake_name="example") + +catalog: Catalog = gravitino_client.load_catalog(name="model_catalog") +model: Model = catalog.as_model_catalog().get_model(ident=NameIdentifier.of("model_schema", "example_model")) +``` + + + + +### Delete a model + +You can delete a model by sending a `DELETE` request to the `/api/metalakes/{metalake_name} +/catalogs/{catalog_name}/schemas/{schema_name}/models/{model_name}` endpoint or by using the +Gravitino Java/Python client. The following is an example of deleting a model: + + + + +```shell +curl -X DELETE -H "Accept: application/vnd.gravitino.v1+json" \ +-H "Content-Type: application/json" \ +http://localhost:8090/api/metalakes/example/catalogs/model_catalog/schemas/model_schema/models/example_model +``` + + + + +```java +// ... +Catalog catalog = gravitinoClient.loadCatalog("model_catalog"); +catalog.asModelCatalog().deleteModel(NameIdentifier.of("model_schema", "example_model")); +// ... +``` + + + + +```python +gravitino_client: GravitinoClient = GravitinoClient(uri="http://localhost:8090", metalake_name="example") + +catalog: Catalog = gravitino_client.load_catalog(name="model_catalog") +catalog.as_model_catalog().delete_model(NameIdentifier.of("model_schema", "example_model")) +``` + + + + +Note that the delete operation will delete all the model versions under this model. + +### List models + +You can list all the models in a schema by sending a `GET` request to the `/api/metalakes/ +{metalake_name}/catalogs/{catalog_name}/schemas/{schema_name}/models` endpoint or by using the +Gravitino Java/Python client. The following is an example of listing all the models in a schema: + + + + +```shell +curl -X GET -H "Accept: application/vnd.gravitino.v1+json" \ +-H "Content-Type: application/json" \ +http://localhost:8090/api/metalakes/example/catalogs/model_catalog/schemas/model_schema/models +``` + + + + +```java +// ... +Catalog catalog = gravitinoClient.loadCatalog("model_catalog"); +NameIdentifier[] identifiers = catalog.asModelCatalog().listModels(Namespace.of("model_schema")); +// ... +``` + + + + +```python +gravitino_client: GravitinoClient = GravitinoClient(uri="http://localhost:8090", metalake_name="example") + +catalog: Catalog = gravitino_client.load_catalog(name="model_catalog") +model_list = catalog.as_model_catalog().list_models(namespace=Namespace.of("model_schema"))) +``` + + + + +## ModelVersion operations + +:::tip + - Users should create a metalake, a catalog, a schema, and a model before link a model version + to the model. +::: + +### Link a ModelVersion + +You can link a ModelVersion by sending a `POST` request to the `/api/metalakes/{metalake_name} +/catalogs/{catalog_name}/schemas/{schema_name}/models/{model_name}/versions` endpoint or by using +the Gravitino Java/Python client. The following is an example of linking a ModelVersion: + + + + +```shell +curl -X POST -H "Accept: application/vnd.gravitino.v1+json" \ +-H "Content-Type: application/json" -d '{ + "uri": "path/to/model", + "aliases": ["alias1", "alias2"], + "comment": "This is version 0", + "properties": { + "k1": "v1" + } +}' http://localhost:8090/api/metalakes/example/catalogs/model_catalog/schemas/model_schema/models/example_model/versions +``` + + + + +```java +// ... +Catalog catalog = gravitinoClient.loadCatalog("model_catalog"); +catalog.asModelCatalog().linkModelVersion( + NameIdentifier.of("model_schema", "example_model"), + "path/to/model", + new String[] {"alias1", "alias2"}, + "This is version 0", + ImmutableMap.of("k1", "v1")); +``` + + + + +```python +gravitino_client: GravitinoClient = GravitinoClient(uri="http://localhost:8090", metalake_name="example") + +catalog: Catalog = gravitino_client.load_catalog(name="model_catalog") +catalog.as_model_catalog().link_model_version(model_ident=NameIdentifier.of("model_schema", "example_model"), + uri="path/to/model", + aliases=["alias1", "alias2"], + comment="This is version 0", + properties={"k1": "v1"}) +``` + + + + +The comment and properties of ModelVersion can be different from the model. + +### Get a ModelVersion + +You can get a ModelVersion by sending a `GET` request to the `/api/metalakes/{metalake_name} +/catalogs/{catalog_name}/schemas/{schema_name}/models/{model_name}/versions/{version_number}` +endpoint or by using the Gravitino Java/Python client. The following is an example of getting +a ModelVersion: + + + + +```shell +curl -X GET -H "Accept: application/vnd.gravitino.v1+json" \ +-H "Content-Type: application/json" \ +http://localhost:8090/api/metalakes/example/catalogs/model_catalog/schemas/model_schema/models/example_model/versions/0 +``` + + + + +```java +// ... +Catalog catalog = gravitinoClient.loadCatalog("model_catalog"); +catalog.asModelCatalog().getModelVersion(NameIdentifier.of("model_schema", "example_model"), 0); +// ... +``` + + + + +```python +gravitino_client: GravitinoClient = GravitinoClient(uri="http://localhost:8090", metalake_name="example") + +catalog: Catalog = gravitino_client.load_catalog(name="model_catalog") +catalog.as_model_catalog().get_model_version(model_ident=NameIdentifier.of("model_schema", "example_model"), version=0) +``` + + + + +### Get a ModelVersion by alias + +You can also get a ModelVersion by sending a `GET` request to the `/api/metalakes/{metalake_name} +/catalogs/{catalog_name}/schemas/{schema_name}/models/{model_name}/aliases/{alias}` endpoint or +by using the Gravitino Java/Python client. The following is an example of getting a ModelVersion +by alias: + + + + +```shell +curl -X GET -H "Accept: application/vnd.gravitino.v1+json" \ +-H "Content-Type: application/json" \ +http://localhost:8090/api/metalakes/example/catalogs/model_catalog/schemas/model_schema/models/example_model/aliases/alias1 +``` + + + + +```java +// ... +Catalog catalog = gravitinoClient.loadCatalog("model_catalog"); +ModelVersion modelVersion = catalog.asModelCatalog().getModelVersion(NameIdentifier.of("model_schema", "example_model"), "alias1"); +// ... +``` + + + + +```python +gravitino_client: GravitinoClient = GravitinoClient(uri="http://localhost:8090", metalake_name="example") + +catalog: Catalog = gravitino_client.load_catalog(name="model_catalog") +model_version: ModelVersion = catalog.as_model_catalog().get_model_version_by_alias(model_ident=NameIdentifier.of("model_schema", "example_model"), alias="alias1") +``` + + + + +### Delete a ModelVersion + +You can delete a ModelVersion by sending a `DELETE` request to the `/api/metalakes/{metalake_name} +/catalogs/{catalog_name}/schemas/{schema_name}/models/{model_name}/versions/{version_number}` +endpoint or by using the Gravitino Java/Python client. The following is an example of deleting +a ModelVersion: + + + + +```shell +curl -X DELETE -H "Accept: application/vnd.gravitino.v1+json" \ +-H "Content-Type: application/json" \ +http://localhost:8090/api/metalakes/example/catalogs/model_catalog/schemas/model_schema/models/example_model/versions/0 +``` + + + + +```java +// ... +Catalog catalog = gravitinoClient.loadCatalog("model_catalog"); +catalog.asModelCatalog().deleteModelVersion(NameIdentifier.of("model_schema", "example_model"), 0); +// ... +``` + + + + +```python +gravitino_client: GravitinoClient = GravitinoClient(uri="http://localhost:8090", metalake_name="example") + +catalog: Catalog = gravitino_client.load_catalog(name="model_catalog") +catalog.as_model_catalog().delete_model_version(model_ident=NameIdentifier.of("model_schema", "example_model"), version=0) +``` + + + + +### Delete a ModelVersion by alias + +You can also delete a ModelVersion by sending a `DELETE` request to the `/api/metalakes/ +{metalake_name}/catalogs/{catalog_name}/schemas/{schema_name}/models/{model_name}/aliases/{alias}` endpoint or +by using the Gravitino Java/Python client. The following is an example of deleting a ModelVersion +by alias: + + + + +```shell +curl -X DELETE -H "Accept: application/vnd.gravitino.v1+json" \ +-H "Content-Type: application/json" \ +http://localhost:8090/api/metalakes/example/catalogs/model_catalog/schemas/model_schema/models/example_model/aliases/alias1 +``` + + + + +```java +// ... +Catalog catalog = gravitinoClient.loadCatalog("model_catalog"); +catalog.asModelCatalog().deleteModelVersion(NameIdentifier.of("model_schema", "example_model"), "alias1"); +// ... +``` + + + + +```python +gravitino_client: GravitinoClient = GravitinoClient(uri="http://localhost:8090", metalake_name="example") + +catalog: Catalog = gravitino_client.load_catalog(name="model_catalog") +catalog.as_model_catalog().delete_model_version_by_alias(model_ident=NameIdentifier.of("model_schema", "example_model"), alias="alias1") +``` + + + + +### List ModelVersions + +You can list all the ModelVersions in a model by sending a `GET` request to the `/api/metalakes/ +{metalake_name}/catalogs/{catalog_name}/schemas/{schema_name}/models/{model_name}/versions` endpoint +or by using the Gravitino Java/Python client. The following is an example of listing all the +ModelVersions in a model: + + + + +```shell +curl -X GET -H "Accept: application/vnd.gravitino.v1+json" \ +-H "Content-Type: application/json" \ +http://localhost:8090/api/metalakes/example/catalogs/model_catalog/schemas/model_schema/models/example_model/versions +``` + + + + +```java +// ... +Catalog catalog = gravitinoClient.loadCatalog("model_catalog"); +int[] modelVersions = catalog.asModelCatalog().listModelVersions(NameIdentifier.of("model_schema", "example_model")); +// ... +``` + + + + +```python +gravitino_client: GravitinoClient = GravitinoClient(uri="http://localhost:8090", metalake_name="example") + +catalog: Catalog = gravitino_client.load_catalog(name="model_catalog") +model_versions: List[int] = catalog.as_model_catalog().list_model_versions(model_ident=NameIdentifier.of("model_schema", "example_model")) +``` + + + diff --git a/docs/model-catalog.md b/docs/model-catalog.md new file mode 100644 index 00000000000..a9da0c8b3f6 --- /dev/null +++ b/docs/model-catalog.md @@ -0,0 +1,87 @@ +--- +title: "Model catalog" +slug: /model-catalog +date: 2024-12-26 +keyword: model catalog +license: "This software is licensed under the Apache License version 2." +--- + +## Introduction + +A Model catalog is a metadata catalog that provides the unified interface to manage the metadata of +machine learning models in a centralized way. It follows the typical Gravitino 3-level namespace +(catalog, schema, and model) to manage the ML models metadata. In addition, it supports +managing the versions for each model. + +The advantages of using model catalog are: + +* Centralized management of ML models with user defined namespaces. Users can better discover + and govern the models from sematic level, rather than managing the model files directly. +* Version management for each model. Users can easily track the model versions and manage the + model lifecycle. + +The key concept of model management is to manage the path (URI) of the model. Instead of +managing the model storage path physically and separately, model metadata defines the mapping +relation between the model name and the storage path. In the meantime, with the support of +extensible properties of model metadata, users can define the model metadata with more detailed information +rather than just the storage path. + +* **Model**: A model is a metadata object defined in the model catalog, to manage a ML model. Each + model can have many **Model Versions**, and each version can have its own properties. Models + can be retrieved by the name. +* **ModelVersion**: The model version is a metadata defined in the model catalog, to manage each + version of the ML model. Each version has a unique version number, and can have its own + properties and storage path. ModelVersion can be retrieved by the model name and version + number. Also, each version can have a list of aliases, which can also be used to retrieve. + +## Catalog + +### Catalog properties + +A Model catalog doesn't have specific properties. It uses the [common catalog properties](./gravitino-server-config.md#apache-gravitino-catalog-properties-configuration). + +### Catalog operations + +Refer to [Catalog operations](./manage-model-metadata-using-gravitino.md#catalog-operations) for more details. + +## Schema + +### Schema capabilities + +Schema is the second level of the model catalog namespace, the model catalog supports creating, updating, deleting, and listing schemas. + +### Schema properties + +Schema in the model catalog doesn't have predefined properties. Users can define the properties for each schema. + +### Schema operations + +Refer to [Schema operation](./manage-model-metadata-using-gravitino.md#schema-operations) for more details. + +## Model + +### Model capabilities + +The Model catalog supports registering, listing and deleting models and model versions. + +### Model properties + +Model doesn't have predefined properties. Users can define the properties for each model and model version. + +### Model operations + +Refer to [Model operation](./manage-model-metadata-using-gravitino.md#model-operations) for more details. + +## ModelVersion + +### ModelVersion capabilities + +The Model catalog supports linking, listing and deleting model versions. + +### ModelVersion properties + +ModelVersion doesn't have predefined properties. Users can define the properties for each version. + +### ModelVersion operations + +Refer to [ModelVersion operation](./manage-model-metadata-using-gravitino.md#model-version-operations) for more details. diff --git a/docs/open-api/catalogs.yaml b/docs/open-api/catalogs.yaml index 0096944f27f..9e4efdaf588 100644 --- a/docs/open-api/catalogs.yaml +++ b/docs/open-api/catalogs.yaml @@ -291,6 +291,7 @@ components: - hive - lakehouse-iceberg - lakehouse-paimon + - lakehouse-hudi - jdbc-mysql - jdbc-postgresql - jdbc-doris diff --git a/docs/open-api/credentials.yaml b/docs/open-api/credentials.yaml new file mode 100644 index 00000000000..4f5106c3964 --- /dev/null +++ b/docs/open-api/credentials.yaml @@ -0,0 +1,119 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +--- + +paths: + + /metalakes/{metalake}/objects/{metadataObjectType}/{metadataObjectFullName}/credentials: + parameters: + - $ref: "./openapi.yaml#/components/parameters/metalake" + - $ref: "./openapi.yaml#/components/parameters/metadataObjectType" + - $ref: "./openapi.yaml#/components/parameters/metadataObjectFullName" + get: + tags: + - credentials + summary: Get credentials + operationId: getCredentials + responses: + "200": + description: Returns the list of credential objects associated with specified metadata object. + content: + application/vnd.gravitino.v1+json: + schema: + $ref: "#/components/responses/CredentialResponse" + examples: + CredentialResponse: + $ref: "#/components/examples/CredentialResponse" + "400": + $ref: "./openapi.yaml#/components/responses/BadRequestErrorResponse" + "404": + description: Not Found - The specified metalake does not exist + content: + application/vnd.gravitino.v1+json: + schema: + $ref: "./openapi.yaml#/components/schemas/ErrorModel" + examples: + NoSuchMetalakeException: + $ref: "./metalakes.yaml#/components/examples/NoSuchMetalakeException" + "5xx": + $ref: "./openapi.yaml#/components/responses/ServerErrorResponse" + + +components: + schemas: + Credential: + type: object + description: A credential + required: + - credentialType + - expireTimeInMs + - credentialInfo + properties: + credentialType: + type: string + description: The type of the credential, for example, s3-token, s3-secret-key, oss-token, oss-secret-key, gcs-token, adls-token, azure-account-key, etc. + expireTimeInMs: + type: integer + description: The expiration time of the credential in milliseconds since the epoch, 0 means not expire. + credentialInfo: + type: object + description: The specific information of the credential. + default: { } + additionalProperties: + type: string + + responses: + CredentialResponse: + type: object + properties: + code: + type: integer + format: int32 + description: Status code of the response + enum: + - 0 + credentials: + type: array + description: A list of credential objects + items: + $ref: "#/components/schemas/Credential" + + examples: + CredentialResponse: + value: { + "code": 0, + "credentials": [ + { + "credentialType": "s3-token", + "expireTimeInMs": 1735891948411, + "credentialInfo": { + "s3-access-key-id": "value1", + "s3-secret-access-key": "value2", + "s3-session-token": "value3" + } + }, + { + "credentialType": "s3-secret-key", + "expireTimeInMs": 0, + "credentialInfo": { + "s3-access-key-id": "value1", + "s3-secret-access-key": "value2" + } + }, + ] + } \ No newline at end of file diff --git a/docs/open-api/models.yaml b/docs/open-api/models.yaml index 713a7037cd6..652923286b3 100644 --- a/docs/open-api/models.yaml +++ b/docs/open-api/models.yaml @@ -122,6 +122,33 @@ paths: "5xx": $ref: "./openapi.yaml#/components/responses/ServerErrorResponse" + /metalakes/{metalake}/catalogs/{catalog}/schemas/{schema}/models/{model}/versions: + parameters: + - $ref: "./openapi.yaml#/components/parameters/metalake" + - $ref: "./openapi.yaml#/components/parameters/catalog" + - $ref: "./openapi.yaml#/components/parameters/schema" + - $ref: "./openapi.yaml#/components/parameters/model" + + get: + tags: + - model + summary: List model versions + operationId: listModelVersions + responses: + "200": + $ref: "#/components/responses/ModelVersionListResponse" + "404": + description: Not Found - The target model does not exist + content: + application/vnd.gravitino.v1+json: + schema: + $ref: "./openapi.yaml#/components/schemas/ErrorModel" + examples: + NoSuchModelException: + $ref: "#/components/examples/NoSuchModelException" + "5xx": + $ref: "./openapi.yaml#/components/responses/ServerErrorResponse" + post: tags: - model @@ -159,33 +186,6 @@ paths: "5xx": $ref: "./openapi.yaml#/components/responses/ServerErrorResponse" - /metalakes/{metalake}/catalogs/{catalog}/schemas/{schema}/models/{model}/versions: - parameters: - - $ref: "./openapi.yaml#/components/parameters/metalake" - - $ref: "./openapi.yaml#/components/parameters/catalog" - - $ref: "./openapi.yaml#/components/parameters/schema" - - $ref: "./openapi.yaml#/components/parameters/model" - - get: - tags: - - model - summary: List model versions - operationId: listModelVersions - responses: - "200": - $ref: "#/components/responses/ModelVersionListResponse" - "404": - description: Not Found - The target model does not exist - content: - application/vnd.gravitino.v1+json: - schema: - $ref: "./openapi.yaml#/components/schemas/ErrorModel" - examples: - NoSuchModelException: - $ref: "#/components/examples/NoSuchModelException" - "5xx": - $ref: "./openapi.yaml#/components/responses/ServerErrorResponse" - /metalakes/{metalake}/catalogs/{catalog}/schemas/{schema}/models/{model}/versions/{version}: parameters: - $ref: "./openapi.yaml#/components/parameters/metalake" diff --git a/docs/open-api/openapi.yaml b/docs/open-api/openapi.yaml index d0c941ab471..f39a90f55f5 100644 --- a/docs/open-api/openapi.yaml +++ b/docs/open-api/openapi.yaml @@ -68,6 +68,9 @@ paths: /metalakes/{metalake}/objects/{metadataObjectType}/{metadataObjectFullName}/roles: $ref: "./roles.yaml#/paths/~1metalakes~1%7Bmetalake%7D~1objects~1%7BmetadataObjectType%7D~1%7BmetadataObjectFullName%7D~1roles" + /metalakes/{metalake}/objects/{metadataObjectType}/{metadataObjectFullName}/credentials: + $ref: "./credentials.yaml#/paths/~1metalakes~1%7Bmetalake%7D~1objects~1%7BmetadataObjectType%7D~1%7BmetadataObjectFullName%7D~1credentials" + /metalakes/{metalake}/objects/{metadataObjectType}/{metadataObjectFullName}/tags/{tag}: $ref: "./tags.yaml#/paths/~1metalakes~1%7Bmetalake%7D~1objects~1%7BmetadataObjectType%7D~1%7BmetadataObjectFullName%7D~1tags~1%7Btag%7D" diff --git a/docs/open-api/roles.yaml b/docs/open-api/roles.yaml index 986d0fdc6f1..5ce9c26eec5 100644 --- a/docs/open-api/roles.yaml +++ b/docs/open-api/roles.yaml @@ -148,7 +148,7 @@ paths: /metalakes/{metalake}/objects/{metadataObjectType}/{metadataObjectFullName}/roles: parameters: - $ref: "./openapi.yaml#/components/parameters/metalake" - - $ref: "./openapi.yaml#/components/parameters/metadataObjectType" + - $ref: "#/components/parameters/metadataObjectTypeOfRole" - $ref: "./openapi.yaml#/components/parameters/metadataObjectFullName" get: @@ -386,4 +386,19 @@ components: value: { "code": 0, "names": [ "user1", "user2" ] - } \ No newline at end of file + } + parameters: + metadataObjectTypeOfRole: + name: metadataObjectType + in: path + description: The type of the metadata object + required: true + schema: + type: string + enum: + - "METALAKE" + - "CATALOG" + - "SCHEMA" + - "TABLE" + - "FILESET" + - "TOPIC" \ No newline at end of file diff --git a/docs/open-api/tags.yaml b/docs/open-api/tags.yaml index 7b8deef2520..a3be5230b94 100644 --- a/docs/open-api/tags.yaml +++ b/docs/open-api/tags.yaml @@ -206,6 +206,15 @@ paths: $ref: "#/components/examples/TagListResponse" "400": $ref: "./openapi.yaml#/components/responses/BadRequestErrorResponse" + "404": + description: Not Found - The specified metalake does not exist + content: + application/vnd.gravitino.v1+json: + schema: + $ref: "./openapi.yaml#/components/schemas/ErrorModel" + examples: + NoSuchMetalakeException: + $ref: "./metalakes.yaml#/components/examples/NoSuchMetalakeException" "5xx": $ref: "./openapi.yaml#/components/responses/ServerErrorResponse" @@ -233,6 +242,15 @@ paths: examples: NameListResponse: $ref: "#/components/examples/NameListResponse" + "404": + description: Not Found - The specified metalake does not exist + content: + application/vnd.gravitino.v1+json: + schema: + $ref: "./openapi.yaml#/components/schemas/ErrorModel" + examples: + NoSuchMetalakeException: + $ref: "./metalakes.yaml#/components/examples/NoSuchMetalakeException" "409": description: Conflict - The target tag already associated with the specified metadata object content: @@ -272,7 +290,7 @@ paths: "400": $ref: "./openapi.yaml#/components/responses/BadRequestErrorResponse" "404": - description: Not Found - The specified metadata object does not exist or the specified tag is not associated with the specified metadata object + description: Not Found - The specified metalake does not exist or the specified tag is not associated with the specified metadata object content: application/vnd.gravitino.v1+json: schema: @@ -280,6 +298,8 @@ paths: examples: NoSuchTagException: $ref: "#/components/examples/NoSuchTagException" + NoSuchMetalakeException: + $ref: "./metalakes.yaml#/components/examples/NoSuchMetalakeException" "5xx": $ref: "./openapi.yaml#/components/responses/ServerErrorResponse" diff --git a/docs/overview.md b/docs/overview.md index 2b215412ede..17d0ee48e30 100644 --- a/docs/overview.md +++ b/docs/overview.md @@ -37,7 +37,7 @@ For example, relational metadata models for tabular data, like Hive, MySQL, Post File metadata model for all the unstructured data, like HDFS, S3, and others. Besides the unified metadata models, Gravitino also provides a unified metadata governance layer -(WIP) to manage the metadata in a unified way, including access control, auditing, discovery and +to manage the metadata in a unified way, including access control, auditing, discovery and others. ### Direct metadata management @@ -63,24 +63,28 @@ change the existing SQL dialects. In the meantime, other query engine support is on the roadmap, including [Apache Spark](https://spark.apache.org/), [Apache Flink](https://flink.apache.org/) and others. -### AI asset management (WIP) +### AI asset management -The goal of Gravitino is to unify the data management in both data and AI assets. The support of AI -assets like models, features, and others are under development. +The goal of Gravitino is to unify the data management in both data and AI assets, including raw files, models, etc. ## Terminology -### The model of Apache Gravitino +### The metadata object of Apache Gravitino -![Gravitino Model](assets/metadata-model.png) - -* **Metalake**: The top-level container for metadata. Typically, one group has one metalake - to manage all the metadata in it. Each metalake exposes a three-level namespace(catalog.schema. +* **Metalake**: The container/tenant for metadata. Typically, one group has one metalake + to manage all the metadata in it. Each metalake exposes a three-level namespace (catalog.schema. table) to organize the data. * **Catalog**: A catalog is a collection of metadata from a specific metadata source. Each catalog has a related connector to connect to the specific metadata source. -* **Schema**: A schema is equivalent to a database, Schemas only exist in the specific catalogs - that support relational metadata sources, such as Apache Hive, MySQL, PostgreSQL, and others. +* **Schema**: Schema is the second level namespace to group a collection of metadata, schema can + refer to the database/schema in the relational metadata sources, such as Apache Hive, MySQL, + PostgreSQL, and others. Schema can also refer to the logic namespace for the fileset and model + catalog. * **Table**: The lowest level in the object hierarchy for catalogs that support relational metadata sources. You can create Tables in specific schemas in the catalogs. -* **Model**: The model represents the metadata in the specific catalogs that support model management. +* **Fileset**: The fileset metadata object refers to a collection of files and directories in + the file system. The fileset metadata object is used to manage the logic metadata for the files. +* **Model**: The model metadata object represents the metadata in the specific catalogs that + support model management. +* **Topic**: The topic metadata object represents the metadata in the specific catalogs that + support managing the topic for a message queue system, such as Kafka. diff --git a/docs/security/credential-vending.md b/docs/security/credential-vending.md new file mode 100644 index 00000000000..92370f4315d --- /dev/null +++ b/docs/security/credential-vending.md @@ -0,0 +1,178 @@ +--- +title: "Gravitino credential vending" +slug: /security/credential-vending +keyword: security credential vending +license: "This software is licensed under the Apache License version 2." +--- + +## Background + +Gravitino credential vending is used to generate temporary or static credentials for accessing data. With credential vending, Gravitino provides an unified way to control the access to diverse data sources in different platforms. + +### Capabilities + +- Supports Gravitino Iceberg REST server. +- Supports Gravitino server, only support Hadoop catalog. +- Supports pluggable credentials with build-in credentials: + - S3: `S3TokenCredential`, `S3SecretKeyCredential` + - GCS: `GCSTokenCredential` + - ADLS: `ADLSTokenCredential`, `AzureAccountKeyCredential` + - OSS: `OSSTokenCredential`, `OSSSecretKeyCredential` +- No support for Spark/Trino/Flink connector yet. + +## General configurations + +| Gravitino server catalog properties | Gravitino Iceberg REST server configurations | Description | Default value | Required | Since Version | +|-------------------------------------|--------------------------------------------------------|--------------------------------------------------------------------------------------------|---------------|----------|------------------| +| `credential-provider-type` | `gravitino.iceberg-rest.credential-provider-type` | Deprecated, please use `credential-providers` instead. | (none) | Yes | 0.7.0-incubating | +| `credential-providers` | `gravitino.iceberg-rest.credential-providers` | The credential provider types, separated by comma. | (none) | Yes | 0.8.0-incubating | +| `credential-cache-expire-ratio` | `gravitino.iceberg-rest.credential-cache-expire-ratio` | Ratio of the credential's expiration time when Gravitino remove credential from the cache. | 0.15 | No | 0.8.0-incubating | +| `credential-cache-max-size` | `gravitino.iceberg-rest.cache-max-size` | Max size for the credential cache. | 10000 | No | 0.8.0-incubating | + +## Build-in credentials configurations + +### S3 credentials + +#### S3 secret key credential + +A credential with static S3 access key id and secret access key. + +| Gravitino server catalog properties | Gravitino Iceberg REST server configurations | Description | Default value | Required | Since Version | +|-------------------------------------|---------------------------------------------------|--------------------------------------------------------|---------------|----------|------------------| +| `credential-providers` | `gravitino.iceberg-rest.credential-providers` | `s3-secret-key` for S3 secret key credential provider. | (none) | Yes | 0.8.0-incubating | +| `s3-access-key-id` | `gravitino.iceberg-rest.s3-access-key-id` | The static access key ID used to access S3 data. | (none) | Yes | 0.6.0-incubating | +| `s3-secret-access-key` | `gravitino.iceberg-rest.s3-secret-access-key` | The static secret access key used to access S3 data. | (none) | Yes | 0.6.0-incubating | + +#### S3 token credential + +An S3 token is a token credential with scoped privileges, by leveraging STS [Assume Role](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html). To use an S3 token credential, you should create a role and grant it proper privileges. + +| Gravitino server catalog properties | Gravitino Iceberg REST server configurations | Description | Default value | Required | Since Version | +|-------------------------------------|----------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|----------|------------------| +| `credential-providers` | `gravitino.iceberg-rest.credential-providers` | `s3-token` for S3 token credential provider. | (none) | Yes | 0.8.0-incubating | +| `s3-access-key-id` | `gravitino.iceberg-rest.s3-access-key-id` | The static access key ID used to access S3 data. | (none) | Yes | 0.6.0-incubating | +| `s3-secret-access-key` | `gravitino.iceberg-rest.s3-secret-access-key` | The static secret access key used to access S3 data. | (none) | Yes | 0.6.0-incubating | +| `s3-role-arn` | `gravitino.iceberg-rest.s3-role-arn` | The ARN of the role to access the S3 data. | (none) | Yes | 0.7.0-incubating | +| `s3-external-id` | `gravitino.iceberg-rest.s3-external-id` | The S3 external id to generate token. | (none) | No | 0.7.0-incubating | +| `s3-token-expire-in-secs` | `gravitino.iceberg-rest.s3-token-expire-in-secs` | The S3 session token expire time in secs, it couldn't exceed the max session time of the assumed role. | 3600 | No | 0.7.0-incubating | +| `s3-token-service-endpoint` | `gravitino.iceberg-rest.s3-token-service-endpoint` | An alternative endpoint of the S3 token service, This could be used with s3-compatible object storage service like MINIO that has a different STS endpoint. | (none) | No | 0.8.0-incubating | + +### OSS credentials + +#### OSS secret key credential + +A credential with static OSS access key id and secret access key. + +| Gravitino server catalog properties | Gravitino Iceberg REST server configurations | Description | Default value | Required | Since Version | +|-------------------------------------|---------------------------------------------------|-------------------------------------------------------------------------------|---------------|----------|------------------| +| `credential-providers` | `gravitino.iceberg-rest.credential-providers` | `oss-secret-key` for OSS secret credential. | (none) | Yes | 0.8.0-incubating | +| `oss-access-key-id` | `gravitino.iceberg-rest.oss-access-key-id` | The static access key ID used to access OSS data. | (none) | Yes | 0.7.0-incubating | +| `oss-secret-access-key` | `gravitino.iceberg-rest.oss-secret-access-key` | The static secret access key used to access OSS data. | (none) | Yes | 0.7.0-incubating | + +#### OSS token credential + +An OSS token is a token credential with scoped privileges, by leveraging STS [Assume Role](https://www.alibabacloud.com/help/en/oss/developer-reference/use-temporary-access-credentials-provided-by-sts-to-access-oss). To use an OSS token credential, you should create a role and grant it proper privileges. + +| Gravitino server catalog properties | Gravitino Iceberg REST server configurations | Description | Default value | Required | Since Version | +|-------------------------------------|---------------------------------------------------|-------------------------------------------------------------------------------|---------------|----------|------------------| +| `credential-providers` | `gravitino.iceberg-rest.credential-providers` | `oss-token` for s3 token credential. | (none) | Yes | 0.8.0-incubating | +| `oss-access-key-id` | `gravitino.iceberg-rest.oss-access-key-id` | The static access key ID used to access OSS data. | (none) | Yes | 0.7.0-incubating | +| `oss-secret-access-key` | `gravitino.iceberg-rest.oss-secret-access-key` | The static secret access key used to access OSS data. | (none) | Yes | 0.7.0-incubating | +| `oss-role-arn` | `gravitino.iceberg-rest.oss-role-arn` | The ARN of the role to access the OSS data. | (none) | Yes | 0.8.0-incubating | +| `oss-external-id` | `gravitino.iceberg-rest.oss-external-id` | The OSS external id to generate token. | (none) | No | 0.8.0-incubating | +| `oss-token-expire-in-secs` | `gravitino.iceberg-rest.oss-token-expire-in-secs` | The OSS security token expire time in secs. | 3600 | No | 0.8.0-incubating | + +### ADLS credentials + +#### Azure account key credential + +A credential with static Azure storage account name and key. + +| Gravitino server catalog properties | Gravitino Iceberg REST server configurations | Description | Default value | Required | Since Version | +|-------------------------------------|-----------------------------------------------------|-----------------------------------------------------------|---------------|----------|------------------| +| `credential-providers` | `gravitino.iceberg-rest.credential-providers` | `azure-account-key` for Azure account key credential. | (none) | Yes | 0.8.0-incubating | +| `azure-storage-account-name` | `gravitino.iceberg-rest.azure-storage-account-name` | The static storage account name used to access ADLS data. | (none) | Yes | 0.8.0-incubating | +| `azure-storage-account-key` | `gravitino.iceberg-rest.azure-storage-account-key` | The static storage account key used to access ADLS data. | (none) | Yes | 0.8.0-incubating | + +#### ADLS token credential + +An ADLS token is a token credential with scoped privileges, by leveraging Azure [User Delegation Sas](https://learn.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas). To use an ADLS token credential, you should create a Microsoft Entra ID service principal and grant it proper privileges. + +| Gravitino server catalog properties | Gravitino Iceberg REST server configurations | Description | Default value | Required | Since Version | +|-------------------------------------|-----------------------------------------------------|---------------------------------------------------------------------|---------------|----------|------------------| +| `credential-providers` | `gravitino.iceberg-rest.credential-providers` | `adls-token` for ADLS token credential. | (none) | Yes | 0.8.0-incubating | +| `azure-storage-account-name` | `gravitino.iceberg-rest.azure-storage-account-name` | The static storage account name used to access ADLS data. | (none) | Yes | 0.8.0-incubating | +| `azure-storage-account-key` | `gravitino.iceberg-rest.azure-storage-account-key` | The static storage account key used to access ADLS data. | (none) | Yes | 0.8.0-incubating | +| `azure-tenant-id` | `gravitino.iceberg-rest.azure-tenant-id` | Azure Active Directory (AAD) tenant ID. | (none) | Yes | 0.8.0-incubating | +| `azure-client-id` | `gravitino.iceberg-rest.azure-client-id` | Azure Active Directory (AAD) client ID used for authentication. | (none) | Yes | 0.8.0-incubating | +| `azure-client-secret` | `gravitino.iceberg-rest.azure-client-secret` | Azure Active Directory (AAD) client secret used for authentication. | (none) | Yes | 0.8.0-incubating | +| `adls-token-expire-in-secs` | `gravitino.iceberg-rest.adls-token-expire-in-secs` | The ADLS SAS token expire time in secs. | 3600 | No | 0.8.0-incubating | + +### GCS credentials + +#### GCS token credential + +An GCS token is a token credential with scoped privileges, by leveraging GCS [Credential Access Boundaries](https://cloud.google.com/iam/docs/downscoping-short-lived-credentials). To use an GCS token credential, you should create an GCS service account and grant it proper privileges. + +| Gravitino server catalog properties | Gravitino Iceberg REST server configurations | Description | Default value | Required | Since Version | +|-------------------------------------|---------------------------------------------------|------------------------------------------------------------|-------------------------------------|----------|------------------| +| `credential-providers` | `gravitino.iceberg-rest.credential-providers` | `gcs-token` for GCS token credential. | (none) | Yes | 0.8.0-incubating | +| `gcs-credential-file-path` | `gravitino.iceberg-rest.gcs-credential-file-path` | Deprecated, please use `gcs-service-account-file` instead. | GCS Application default credential. | No | 0.7.0-incubating | +| `gcs-service-account-file` | `gravitino.iceberg-rest.gcs-service-account-file` | The location of GCS credential file. | GCS Application default credential. | No | 0.8.0-incubating | + +:::note +For Gravitino Iceberg REST server, please ensure that the credential file can be accessed by the server. For example, if the server is running on a GCE machine, or you can set the environment variable as `export GOOGLE_APPLICATION_CREDENTIALS=/xx/application_default_credentials.json`, even when the `gcs-service-account-file` has already been configured. +::: + +## Custom credentials + +Gravitino supports custom credentials, you can implement the `org.apache.gravitino.credential.CredentialProvider` interface to support custom credentials, and place the corresponding jar to the classpath of Iceberg catalog server or Hadoop catalog. + +## Deployment + +Besides setting credentials related configuration, please download Gravitino cloud bundle jar and place it in the classpath of Iceberg REST server or Hadoop catalog. + +Gravitino cloud bundle jar: + +- [Gravitino AWS bundle jar](https://mvnrepository.com/artifact/org.apache.gravitino/gravitino-aws-bundle) +- [Gravitino Aliyun bundle jar](https://mvnrepository.com/artifact/org.apache.gravitino/gravitino-aliyun-bundle) +- [Gravitino GCP bundle jar](https://mvnrepository.com/artifact/org.apache.gravitino/gravitino-gcp-bundle) +- [Gravitino Azure bundle jar](https://mvnrepository.com/artifact/org.apache.gravitino/gravitino-azure-bundle) + +The classpath of the server: + +- Iceberg REST server: the classpath differs in different deploy mode, please refer to [Server management](../iceberg-rest-service.md#server-management) part. +- Hadoop catalog: `catalogs/hadoop/libs/` + +## Usage example + +### Credential vending for Iceberg REST server + +Suppose the Iceberg table data is stored in S3, follow the steps below: + +1. Download the [Gravitino AWS bundle jar](https://mvnrepository.com/artifact/org.apache.gravitino/gravitino-aws-bundle), and place it to the classpath of Iceberg REST server. + +2. Add s3 token credential configurations. + +``` +gravitino.iceberg-rest.warehouse = s3://{bucket_name}/{warehouse_path} +gravitino.iceberg-rest.io-impl= org.apache.iceberg.aws.s3.S3FileIO +gravitino.iceberg-rest.credential-providers = s3-token +gravitino.iceberg-rest.s3-access-key-id = xxx +gravitino.iceberg-rest.s3-secret-access-key = xxx +gravitino.iceberg-rest.s3-region = {region_name} +gravitino.iceberg-rest.s3-role-arn = {role_arn} +``` + +3. Exploring the Iceberg table with Spark client with credential vending enabled. + +```shell +./bin/spark-sql -v \ +--packages org.apache.iceberg:iceberg-spark-runtime-3.4_2.12:1.3.1 \ +--conf spark.jars={path}/iceberg-aws-bundle-1.5.2.jar \ +--conf spark.sql.extensions=org.apache.iceberg.spark.extensions.IcebergSparkSessionExtensions \ +--conf spark.sql.catalog.rest=org.apache.iceberg.spark.SparkCatalog \ +--conf spark.sql.catalog.rest.type=rest \ +--conf spark.sql.catalog.rest.uri=http://127.0.0.1:9001/iceberg/ \ +--conf spark.sql.catalog.rest.header.X-Iceberg-Access-Delegation=vended-credentials +``` diff --git a/server/src/main/java/org/apache/gravitino/server/web/rest/GroupOperations.java b/server/src/main/java/org/apache/gravitino/server/web/rest/GroupOperations.java index 12cf769932e..95db0ca67f3 100644 --- a/server/src/main/java/org/apache/gravitino/server/web/rest/GroupOperations.java +++ b/server/src/main/java/org/apache/gravitino/server/web/rest/GroupOperations.java @@ -103,11 +103,13 @@ public Response addGroup(@PathParam("metalake") String metalake, GroupAddRequest TreeLockUtils.doWithTreeLock( NameIdentifier.of(AuthorizationUtils.ofGroupNamespace(metalake).levels()), LockType.WRITE, - () -> - Utils.ok( - new GroupResponse( - DTOConverters.toDTO( - accessControlManager.addGroup(metalake, request.getName())))))); + () -> { + request.validate(); + return Utils.ok( + new GroupResponse( + DTOConverters.toDTO( + accessControlManager.addGroup(metalake, request.getName())))); + })); } catch (Exception e) { return ExceptionHandlers.handleGroupException( OperationType.ADD, request.getName(), metalake, e); diff --git a/server/src/main/java/org/apache/gravitino/server/web/rest/ModelOperations.java b/server/src/main/java/org/apache/gravitino/server/web/rest/ModelOperations.java index fd507821086..e4b80d0526e 100644 --- a/server/src/main/java/org/apache/gravitino/server/web/rest/ModelOperations.java +++ b/server/src/main/java/org/apache/gravitino/server/web/rest/ModelOperations.java @@ -286,7 +286,7 @@ public Response getModelVersionByAlias( } @POST - @Path("{model}") + @Path("{model}/versions") @Produces("application/vnd.gravitino.v1+json") @Timed(name = "link-model-version." + MetricNames.HTTP_PROCESS_DURATION, absolute = true) @ResponseMetered(name = "link-model-version", absolute = true) diff --git a/server/src/main/java/org/apache/gravitino/server/web/rest/OwnerOperations.java b/server/src/main/java/org/apache/gravitino/server/web/rest/OwnerOperations.java index ea5684b55f9..7dcfcfd0674 100644 --- a/server/src/main/java/org/apache/gravitino/server/web/rest/OwnerOperations.java +++ b/server/src/main/java/org/apache/gravitino/server/web/rest/OwnerOperations.java @@ -113,6 +113,7 @@ public Response setOwnerForObject( return Utils.doAs( httpRequest, () -> { + request.validate(); MetadataObjectUtil.checkMetadataObject(metalake, object); NameIdentifier objectIdent = MetadataObjectUtil.toEntityIdent(metalake, object); TreeLockUtils.doWithTreeLock( diff --git a/server/src/main/java/org/apache/gravitino/server/web/rest/PermissionOperations.java b/server/src/main/java/org/apache/gravitino/server/web/rest/PermissionOperations.java index 38fcd7380e6..3ce1517a46a 100644 --- a/server/src/main/java/org/apache/gravitino/server/web/rest/PermissionOperations.java +++ b/server/src/main/java/org/apache/gravitino/server/web/rest/PermissionOperations.java @@ -87,12 +87,14 @@ public Response grantRolesToUser( TreeLockUtils.doWithTreeLock( NameIdentifier.of(AuthorizationUtils.ofRoleNamespace(metalake).levels()), LockType.READ, - () -> - Utils.ok( - new UserResponse( - DTOConverters.toDTO( - accessControlManager.grantRolesToUser( - metalake, request.getRoleNames(), user))))))); + () -> { + request.validate(); + return Utils.ok( + new UserResponse( + DTOConverters.toDTO( + accessControlManager.grantRolesToUser( + metalake, request.getRoleNames(), user)))); + }))); } catch (Exception e) { return ExceptionHandlers.handleUserPermissionOperationException( OperationType.GRANT, StringUtils.join(request.getRoleNames(), ","), user, e); @@ -119,12 +121,14 @@ public Response grantRolesToGroup( TreeLockUtils.doWithTreeLock( NameIdentifier.of(AuthorizationUtils.ofRoleNamespace(metalake).levels()), LockType.READ, - () -> - Utils.ok( - new GroupResponse( - DTOConverters.toDTO( - accessControlManager.grantRolesToGroup( - metalake, request.getRoleNames(), group))))))); + () -> { + request.validate(); + return Utils.ok( + new GroupResponse( + DTOConverters.toDTO( + accessControlManager.grantRolesToGroup( + metalake, request.getRoleNames(), group)))); + }))); } catch (Exception e) { return ExceptionHandlers.handleGroupPermissionOperationException( OperationType.GRANT, StringUtils.join(request.getRoleNames(), ","), group, e); @@ -151,12 +155,14 @@ public Response revokeRolesFromUser( TreeLockUtils.doWithTreeLock( NameIdentifier.of(AuthorizationUtils.ofRoleNamespace(metalake).levels()), LockType.READ, - () -> - Utils.ok( - new UserResponse( - DTOConverters.toDTO( - accessControlManager.revokeRolesFromUser( - metalake, request.getRoleNames(), user))))))); + () -> { + request.validate(); + return Utils.ok( + new UserResponse( + DTOConverters.toDTO( + accessControlManager.revokeRolesFromUser( + metalake, request.getRoleNames(), user)))); + }))); } catch (Exception e) { return ExceptionHandlers.handleUserPermissionOperationException( OperationType.REVOKE, StringUtils.join(request.getRoleNames(), ","), user, e); @@ -183,12 +189,14 @@ public Response revokeRolesFromGroup( TreeLockUtils.doWithTreeLock( NameIdentifier.of(AuthorizationUtils.ofRoleNamespace(metalake).levels()), LockType.READ, - () -> - Utils.ok( - new GroupResponse( - DTOConverters.toDTO( - accessControlManager.revokeRolesFromGroup( - metalake, request.getRoleNames(), group))))))); + () -> { + request.validate(); + return Utils.ok( + new GroupResponse( + DTOConverters.toDTO( + accessControlManager.revokeRolesFromGroup( + metalake, request.getRoleNames(), group)))); + }))); } catch (Exception e) { return ExceptionHandlers.handleGroupPermissionOperationException( OperationType.REVOKE, StringUtils.join(request.getRoleNames()), group, e); @@ -214,6 +222,8 @@ public Response grantPrivilegeToRole( return Utils.doAs( httpRequest, () -> { + privilegeGrantRequest.validate(); + for (PrivilegeDTO privilegeDTO : privilegeGrantRequest.getPrivileges()) { AuthorizationUtils.checkPrivilege(privilegeDTO, object, metalake); } @@ -259,6 +269,8 @@ public Response revokePrivilegeFromRole( return Utils.doAs( httpRequest, () -> { + privilegeRevokeRequest.validate(); + for (PrivilegeDTO privilegeDTO : privilegeRevokeRequest.getPrivileges()) { AuthorizationUtils.checkPrivilege(privilegeDTO, object, metalake); } diff --git a/server/src/main/java/org/apache/gravitino/server/web/rest/RoleOperations.java b/server/src/main/java/org/apache/gravitino/server/web/rest/RoleOperations.java index e986753d0ce..9690afe13f1 100644 --- a/server/src/main/java/org/apache/gravitino/server/web/rest/RoleOperations.java +++ b/server/src/main/java/org/apache/gravitino/server/web/rest/RoleOperations.java @@ -127,6 +127,7 @@ public Response createRole(@PathParam("metalake") String metalake, RoleCreateReq return Utils.doAs( httpRequest, () -> { + request.validate(); Set metadataObjects = Sets.newHashSet(); for (SecurableObjectDTO object : request.getSecurableObjects()) { MetadataObject metadataObject = @@ -142,10 +143,10 @@ public Response createRole(@PathParam("metalake") String metalake, RoleCreateReq Set privileges = Sets.newHashSet(object.privileges()); AuthorizationUtils.checkDuplicatedNamePrivilege(privileges); - for (Privilege privilege : object.privileges()) { - AuthorizationUtils.checkPrivilege((PrivilegeDTO) privilege, object, metalake); - } try { + for (Privilege privilege : object.privileges()) { + AuthorizationUtils.checkPrivilege((PrivilegeDTO) privilege, object, metalake); + } MetadataObjectUtil.checkMetadataObject(metalake, object); } catch (NoSuchMetadataObjectException nsm) { throw new IllegalMetadataObjectException(nsm); diff --git a/server/src/main/java/org/apache/gravitino/server/web/rest/SchemaOperations.java b/server/src/main/java/org/apache/gravitino/server/web/rest/SchemaOperations.java index 8093da7ef79..55341627b91 100644 --- a/server/src/main/java/org/apache/gravitino/server/web/rest/SchemaOperations.java +++ b/server/src/main/java/org/apache/gravitino/server/web/rest/SchemaOperations.java @@ -210,11 +210,7 @@ public Response dropSchema( httpRequest, () -> { NameIdentifier ident = NameIdentifierUtil.ofSchema(metalake, catalog, schema); - boolean dropped = - TreeLockUtils.doWithTreeLock( - NameIdentifierUtil.ofCatalog(metalake, catalog), - LockType.WRITE, - () -> dispatcher.dropSchema(ident, cascade)); + boolean dropped = dispatcher.dropSchema(ident, cascade); if (!dropped) { LOG.warn("Fail to drop schema {} under namespace {}", schema, ident.namespace()); } diff --git a/server/src/main/java/org/apache/gravitino/server/web/rest/TableOperations.java b/server/src/main/java/org/apache/gravitino/server/web/rest/TableOperations.java index d5cf1ffc7be..3d9d863e985 100644 --- a/server/src/main/java/org/apache/gravitino/server/web/rest/TableOperations.java +++ b/server/src/main/java/org/apache/gravitino/server/web/rest/TableOperations.java @@ -228,11 +228,7 @@ public Response dropTable( httpRequest, () -> { NameIdentifier ident = NameIdentifierUtil.ofTable(metalake, catalog, schema, table); - boolean dropped = - TreeLockUtils.doWithTreeLock( - NameIdentifier.of(metalake, catalog, schema), - LockType.WRITE, - () -> purge ? dispatcher.purgeTable(ident) : dispatcher.dropTable(ident)); + boolean dropped = purge ? dispatcher.purgeTable(ident) : dispatcher.dropTable(ident); if (!dropped) { LOG.warn("Failed to drop table {} under schema {}", table, schema); } diff --git a/server/src/main/java/org/apache/gravitino/server/web/rest/UserOperations.java b/server/src/main/java/org/apache/gravitino/server/web/rest/UserOperations.java index 24f34d652ab..518178cd325 100644 --- a/server/src/main/java/org/apache/gravitino/server/web/rest/UserOperations.java +++ b/server/src/main/java/org/apache/gravitino/server/web/rest/UserOperations.java @@ -129,11 +129,13 @@ public Response addUser(@PathParam("metalake") String metalake, UserAddRequest r TreeLockUtils.doWithTreeLock( NameIdentifier.of(AuthorizationUtils.ofGroupNamespace(metalake).levels()), LockType.WRITE, - () -> - Utils.ok( - new UserResponse( - DTOConverters.toDTO( - accessControlManager.addUser(metalake, request.getName())))))); + () -> { + request.validate(); + return Utils.ok( + new UserResponse( + DTOConverters.toDTO( + accessControlManager.addUser(metalake, request.getName())))); + })); } catch (Exception e) { return ExceptionHandlers.handleUserException( OperationType.ADD, request.getName(), metalake, e); diff --git a/server/src/test/java/org/apache/gravitino/server/web/rest/TestGroupOperations.java b/server/src/test/java/org/apache/gravitino/server/web/rest/TestGroupOperations.java index 77f0cf97988..ac4f8c66a8d 100644 --- a/server/src/test/java/org/apache/gravitino/server/web/rest/TestGroupOperations.java +++ b/server/src/test/java/org/apache/gravitino/server/web/rest/TestGroupOperations.java @@ -116,6 +116,15 @@ public void testAddGroup() { when(manager.addGroup(any(), any())).thenReturn(group); + // test with IllegalRequest + GroupAddRequest illegalReq = new GroupAddRequest(""); + Response illegalResp = + target("/metalakes/metalake1/groups") + .request(MediaType.APPLICATION_JSON_TYPE) + .accept("application/vnd.gravitino.v1+json") + .post(Entity.entity(illegalReq, MediaType.APPLICATION_JSON_TYPE)); + Assertions.assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), illegalResp.getStatus()); + Response resp = target("/metalakes/metalake1/groups") .request(MediaType.APPLICATION_JSON_TYPE) diff --git a/server/src/test/java/org/apache/gravitino/server/web/rest/TestMetadataObjectCredentialOperations.java b/server/src/test/java/org/apache/gravitino/server/web/rest/TestMetadataObjectCredentialOperations.java index 464ccd86984..ce759fac65f 100644 --- a/server/src/test/java/org/apache/gravitino/server/web/rest/TestMetadataObjectCredentialOperations.java +++ b/server/src/test/java/org/apache/gravitino/server/web/rest/TestMetadataObjectCredentialOperations.java @@ -19,7 +19,6 @@ package org.apache.gravitino.server.web.rest; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -35,10 +34,7 @@ import org.apache.gravitino.credential.CredentialOperationDispatcher; import org.apache.gravitino.credential.S3SecretKeyCredential; import org.apache.gravitino.dto.responses.CredentialResponse; -import org.apache.gravitino.dto.responses.ErrorConstants; -import org.apache.gravitino.dto.responses.ErrorResponse; import org.apache.gravitino.dto.util.DTOConverters; -import org.apache.gravitino.exceptions.NoSuchCredentialException; import org.apache.gravitino.rest.RESTUtils; import org.glassfish.jersey.internal.inject.AbstractBinder; import org.glassfish.jersey.server.ResourceConfig; @@ -138,25 +134,6 @@ private void testGetCredentialsForObject(MetadataObject metadataObject) { credentialResponse = response.readEntity(CredentialResponse.class); Assertions.assertEquals(0, credentialResponse.getCode()); Assertions.assertEquals(0, credentialResponse.getCredentials().length); - - // Test throws NoSuchCredentialException - doThrow(new NoSuchCredentialException("mock error")) - .when(credentialOperationDispatcher) - .getCredentials(any()); - response = - target(basePath(metalake)) - .path(metadataObject.type().toString()) - .path(metadataObject.fullName()) - .path("/credentials") - .request(MediaType.APPLICATION_JSON_TYPE) - .accept("application/vnd.gravitino.v1+json") - .get(); - - Assertions.assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); - ErrorResponse errorResponse = response.readEntity(ErrorResponse.class); - Assertions.assertEquals(ErrorConstants.NOT_FOUND_CODE, errorResponse.getCode()); - Assertions.assertEquals( - NoSuchCredentialException.class.getSimpleName(), errorResponse.getType()); } private String basePath(String metalake) { diff --git a/server/src/test/java/org/apache/gravitino/server/web/rest/TestModelOperations.java b/server/src/test/java/org/apache/gravitino/server/web/rest/TestModelOperations.java index 42e48d0302f..c383a07a463 100644 --- a/server/src/test/java/org/apache/gravitino/server/web/rest/TestModelOperations.java +++ b/server/src/test/java/org/apache/gravitino/server/web/rest/TestModelOperations.java @@ -601,6 +601,7 @@ public void testLinkModelVersion() { Response resp = target(modelPath()) .path("model1") + .path("versions") .request(MediaType.APPLICATION_JSON_TYPE) .accept("application/vnd.gravitino.v1+json") .post(Entity.entity(req, MediaType.APPLICATION_JSON_TYPE)); @@ -619,6 +620,7 @@ public void testLinkModelVersion() { Response resp1 = target(modelPath()) .path("model1") + .path("versions") .request(MediaType.APPLICATION_JSON_TYPE) .accept("application/vnd.gravitino.v1+json") .post(Entity.entity(req, MediaType.APPLICATION_JSON_TYPE)); @@ -637,6 +639,7 @@ public void testLinkModelVersion() { Response resp2 = target(modelPath()) .path("model1") + .path("versions") .request(MediaType.APPLICATION_JSON_TYPE) .accept("application/vnd.gravitino.v1+json") .post(Entity.entity(req, MediaType.APPLICATION_JSON_TYPE)); @@ -656,6 +659,7 @@ public void testLinkModelVersion() { Response resp3 = target(modelPath()) .path("model1") + .path("versions") .request(MediaType.APPLICATION_JSON_TYPE) .accept("application/vnd.gravitino.v1+json") .post(Entity.entity(req, MediaType.APPLICATION_JSON_TYPE)); diff --git a/server/src/test/java/org/apache/gravitino/server/web/rest/TestOwnerOperations.java b/server/src/test/java/org/apache/gravitino/server/web/rest/TestOwnerOperations.java index 0643ed9bf1a..dc7451a538c 100644 --- a/server/src/test/java/org/apache/gravitino/server/web/rest/TestOwnerOperations.java +++ b/server/src/test/java/org/apache/gravitino/server/web/rest/TestOwnerOperations.java @@ -202,6 +202,14 @@ public Type type() { @Test void testSetOwnerForObject() { when(metalakeDispatcher.metalakeExists(any())).thenReturn(true); + OwnerSetRequest invalidRequest = new OwnerSetRequest(null, Owner.Type.USER); + Response invalidResp = + target("/metalakes/metalake1/owners/metalake/metalake1") + .request(MediaType.APPLICATION_JSON_TYPE) + .accept("application/vnd.gravitino.v1+json") + .put(Entity.entity(invalidRequest, MediaType.APPLICATION_JSON_TYPE)); + Assertions.assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), invalidResp.getStatus()); + OwnerSetRequest request = new OwnerSetRequest("test", Owner.Type.USER); Response resp = target("/metalakes/metalake1/owners/metalake/metalake1") diff --git a/server/src/test/java/org/apache/gravitino/server/web/rest/TestPermissionOperations.java b/server/src/test/java/org/apache/gravitino/server/web/rest/TestPermissionOperations.java index 8876e9035f4..1f507cbbcc1 100644 --- a/server/src/test/java/org/apache/gravitino/server/web/rest/TestPermissionOperations.java +++ b/server/src/test/java/org/apache/gravitino/server/web/rest/TestPermissionOperations.java @@ -135,8 +135,15 @@ public void testGrantRolesToUser() { .build(); when(manager.grantRolesToUser(any(), any(), any())).thenReturn(userEntity); - RoleGrantRequest request = new RoleGrantRequest(Lists.newArrayList("role1")); + RoleGrantRequest illegalReq = new RoleGrantRequest(null); + Response illegalResp = + target("/metalakes/metalake1/permissions/users/user/grant") + .request(MediaType.APPLICATION_JSON_TYPE) + .accept("application/vnd.gravitino.v1+json") + .put(Entity.entity(illegalReq, MediaType.APPLICATION_JSON_TYPE)); + Assertions.assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), illegalResp.getStatus()); + RoleGrantRequest request = new RoleGrantRequest(Lists.newArrayList("role1")); Response resp = target("/metalakes/metalake1/permissions/users/user/grant") .request(MediaType.APPLICATION_JSON_TYPE) @@ -232,6 +239,15 @@ public void testGrantRolesToGroup() { .build(); when(manager.grantRolesToGroup(any(), any(), any())).thenReturn(groupEntity); + // Test with Illegal request + RoleGrantRequest illegalReq = new RoleGrantRequest(null); + Response illegalResp = + target("/metalakes/metalake1/permissions/groups/group/grant") + .request(MediaType.APPLICATION_JSON_TYPE) + .accept("application/vnd.gravitino.v1+json") + .put(Entity.entity(illegalReq, MediaType.APPLICATION_JSON_TYPE)); + Assertions.assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), illegalResp.getStatus()); + RoleGrantRequest request = new RoleGrantRequest(Lists.newArrayList("role1")); Response resp = @@ -331,6 +347,16 @@ public void testRevokeRolesFromUser() { AuditInfo.builder().withCreator("test").withCreateTime(Instant.now()).build()) .build(); when(manager.revokeRolesFromUser(any(), any(), any())).thenReturn(userEntity); + + // Test with illegal request + RoleRevokeRequest illegalReq = new RoleRevokeRequest(null); + Response illegalResp = + target("/metalakes/metalake1/permissions/users/user1/revoke") + .request(MediaType.APPLICATION_JSON_TYPE) + .accept("application/vnd.gravitino.v1+json") + .put(Entity.entity(illegalReq, MediaType.APPLICATION_JSON_TYPE)); + Assertions.assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), illegalResp.getStatus()); + RoleRevokeRequest request = new RoleRevokeRequest(Lists.newArrayList("role1")); Response resp = @@ -393,6 +419,15 @@ public void testRevokeRolesFromGroup() { AuditInfo.builder().withCreator("test").withCreateTime(Instant.now()).build()) .build(); when(manager.revokeRolesFromGroup(any(), any(), any())).thenReturn(groupEntity); + // Test with illegal request + RoleRevokeRequest illegalReq = new RoleRevokeRequest(null); + Response illegalResp = + target("/metalakes/metalake1/permissions/groups/group1/revoke") + .request(MediaType.APPLICATION_JSON_TYPE) + .accept("application/vnd.gravitino.v1+json") + .put(Entity.entity(illegalReq, MediaType.APPLICATION_JSON_TYPE)); + Assertions.assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), illegalResp.getStatus()); + RoleRevokeRequest request = new RoleRevokeRequest(Lists.newArrayList("role1")); Response resp = @@ -538,6 +573,16 @@ public void testRevokePrivilegesFromRole() { .build(); when(manager.revokePrivilegesFromRole(any(), any(), any(), any())).thenReturn(roleEntity); when(metalakeDispatcher.metalakeExists(any())).thenReturn(true); + + // Test with illegal request + PrivilegeRevokeRequest illegalReq = new PrivilegeRevokeRequest(null); + Response illegalResp = + target("/metalakes/metalake1/permissions/roles/role1/metalake/metalake1/revoke") + .request(MediaType.APPLICATION_JSON_TYPE) + .accept("application/vnd.gravitino.v1+json") + .put(Entity.entity(illegalReq, MediaType.APPLICATION_JSON_TYPE)); + Assertions.assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), illegalResp.getStatus()); + PrivilegeRevokeRequest request = new PrivilegeRevokeRequest( Lists.newArrayList( diff --git a/server/src/test/java/org/apache/gravitino/server/web/rest/TestRoleOperations.java b/server/src/test/java/org/apache/gravitino/server/web/rest/TestRoleOperations.java index 5a53ec5f9f0..06d9fcc27e9 100644 --- a/server/src/test/java/org/apache/gravitino/server/web/rest/TestRoleOperations.java +++ b/server/src/test/java/org/apache/gravitino/server/web/rest/TestRoleOperations.java @@ -29,6 +29,7 @@ import com.google.common.collect.Lists; import java.io.IOException; +import java.lang.reflect.Field; import java.time.Instant; import java.util.Collections; import java.util.concurrent.atomic.AtomicReference; @@ -52,6 +53,7 @@ import org.apache.gravitino.catalog.SchemaDispatcher; import org.apache.gravitino.catalog.TableDispatcher; import org.apache.gravitino.catalog.TopicDispatcher; +import org.apache.gravitino.dto.authorization.PrivilegeDTO; import org.apache.gravitino.dto.authorization.RoleDTO; import org.apache.gravitino.dto.authorization.SecurableObjectDTO; import org.apache.gravitino.dto.requests.RoleCreateRequest; @@ -63,6 +65,7 @@ import org.apache.gravitino.dto.util.DTOConverters; import org.apache.gravitino.exceptions.IllegalNamespaceException; import org.apache.gravitino.exceptions.IllegalPrivilegeException; +import org.apache.gravitino.exceptions.NoSuchCatalogException; import org.apache.gravitino.exceptions.NoSuchMetadataObjectException; import org.apache.gravitino.exceptions.NoSuchMetalakeException; import org.apache.gravitino.exceptions.NoSuchRoleException; @@ -141,7 +144,7 @@ protected void configure() { } @Test - public void testCreateRole() { + public void testCreateRole() throws IllegalAccessException, NoSuchFieldException { SecurableObject securableObject = SecurableObjects.ofCatalog("catalog", Lists.newArrayList(Privileges.UseCatalog.allow())); SecurableObject anotherSecurableObject = @@ -160,6 +163,33 @@ public void testCreateRole() { when(manager.createRole(any(), any(), any(), any())).thenReturn(role); when(catalogDispatcher.catalogExists(any())).thenReturn(true); + // Test with IllegalRequest + RoleCreateRequest illegalRequest = new RoleCreateRequest("role", Collections.emptyMap(), null); + Response illegalResp = + target("/metalakes/metalake1/roles") + .request(MediaType.APPLICATION_JSON_TYPE) + .accept("application/vnd.gravitino.v1+json") + .post(Entity.entity(illegalRequest, MediaType.APPLICATION_JSON_TYPE)); + Assertions.assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), illegalResp.getStatus()); + + SecurableObjectDTO illegalObject = + DTOConverters.toDTO( + SecurableObjects.ofCatalog( + "illegal_catalog", Lists.newArrayList(Privileges.CreateSchema.deny()))); + Field field = illegalObject.getClass().getDeclaredField("privileges"); + field.setAccessible(true); + field.set(illegalObject, new PrivilegeDTO[] {}); + + illegalRequest = + new RoleCreateRequest( + "role", Collections.emptyMap(), new SecurableObjectDTO[] {illegalObject}); + illegalResp = + target("/metalakes/metalake1/roles") + .request(MediaType.APPLICATION_JSON_TYPE) + .accept("application/vnd.gravitino.v1+json") + .post(Entity.entity(illegalRequest, MediaType.APPLICATION_JSON_TYPE)); + Assertions.assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), illegalResp.getStatus()); + Response resp = target("/metalakes/metalake1/roles") .request(MediaType.APPLICATION_JSON_TYPE) @@ -199,8 +229,31 @@ public void testCreateRole() { Privileges.UseCatalog.allow().condition(), roleDTO.securableObjects().get(0).privileges().get(0).condition()); + // Test with a wrong metalake name + RoleCreateRequest reqWithWrongMetalake = + new RoleCreateRequest( + "role", + Collections.emptyMap(), + new SecurableObjectDTO[] { + DTOConverters.toDTO( + SecurableObjects.ofMetalake( + "unknown", Lists.newArrayList(Privileges.UseCatalog.allow()))), + }); + Response respWithWrongMetalake = + target("/metalakes/metalake1/roles") + .request(MediaType.APPLICATION_JSON_TYPE) + .accept("application/vnd.gravitino.v1+json") + .post(Entity.entity(reqWithWrongMetalake, MediaType.APPLICATION_JSON_TYPE)); + Assertions.assertEquals( + Response.Status.BAD_REQUEST.getStatusCode(), respWithWrongMetalake.getStatus()); + Assertions.assertEquals(MediaType.APPLICATION_JSON_TYPE, respWithWrongMetalake.getMediaType()); + ErrorResponse withWrongMetalakeResponse = respWithWrongMetalake.readEntity(ErrorResponse.class); + Assertions.assertEquals( + ErrorConstants.ILLEGAL_ARGUMENTS_CODE, withWrongMetalakeResponse.getCode()); + // Test to a catalog which doesn't exist - when(catalogDispatcher.catalogExists(any())).thenReturn(false); + reset(catalogDispatcher); + when(catalogDispatcher.loadCatalog(any())).thenThrow(new NoSuchCatalogException("mock error")); Response respNotExist = target("/metalakes/metalake1/roles") .request(MediaType.APPLICATION_JSON_TYPE) diff --git a/server/src/test/java/org/apache/gravitino/server/web/rest/TestUserOperations.java b/server/src/test/java/org/apache/gravitino/server/web/rest/TestUserOperations.java index 7f570e779f4..82bc59155ba 100644 --- a/server/src/test/java/org/apache/gravitino/server/web/rest/TestUserOperations.java +++ b/server/src/test/java/org/apache/gravitino/server/web/rest/TestUserOperations.java @@ -115,6 +115,15 @@ public void testAddUser() { when(manager.addUser(any(), any())).thenReturn(user); + // test with IllegalRequest + UserAddRequest illegalReq = new UserAddRequest(""); + Response illegalResp = + target("/metalakes/metalake1/users") + .request(MediaType.APPLICATION_JSON_TYPE) + .accept("application/vnd.gravitino.v1+json") + .post(Entity.entity(illegalReq, MediaType.APPLICATION_JSON_TYPE)); + Assertions.assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), illegalResp.getStatus()); + Response resp = target("/metalakes/metalake1/users") .request(MediaType.APPLICATION_JSON_TYPE) diff --git a/web/web/package.json b/web/web/package.json index 471844e2170..29557e07dbd 100644 --- a/web/web/package.json +++ b/web/web/package.json @@ -33,7 +33,7 @@ "clsx": "^2.1.1", "dayjs": "^1.11.11", "lodash-es": "^4.17.21", - "next": "14.2.10", + "next": "14.2.21", "nprogress": "^0.2.0", "qs": "^6.12.2", "react": "^18.3.1", diff --git a/web/web/pnpm-lock.yaml b/web/web/pnpm-lock.yaml index e5c77f19178..0f2bbbf4bc5 100644 --- a/web/web/pnpm-lock.yaml +++ b/web/web/pnpm-lock.yaml @@ -57,8 +57,8 @@ importers: specifier: ^4.17.21 version: 4.17.21 next: - specifier: 14.2.10 - version: 14.2.10(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + specifier: 14.2.21 + version: 14.2.21(react-dom@18.3.1(react@18.3.1))(react@18.3.1) nprogress: specifier: ^0.2.0 version: 0.2.0 @@ -672,62 +672,62 @@ packages: '@next/bundle-analyzer@14.2.4': resolution: {integrity: sha512-ydSDikSgGhYmBlnvzS4tgdGyn40SCFI9uWDldbkRSwXS60tg4WBJR4qJoTSERTmdAFb1PeUYCyFdfC80i2WL1w==} - '@next/env@14.2.10': - resolution: {integrity: sha512-dZIu93Bf5LUtluBXIv4woQw2cZVZ2DJTjax5/5DOs3lzEOeKLy7GxRSr4caK9/SCPdaW6bCgpye6+n4Dh9oJPw==} + '@next/env@14.2.21': + resolution: {integrity: sha512-lXcwcJd5oR01tggjWJ6SrNNYFGuOOMB9c251wUNkjCpkoXOPkDeF/15c3mnVlBqrW4JJXb2kVxDFhC4GduJt2A==} '@next/eslint-plugin-next@14.0.3': resolution: {integrity: sha512-j4K0n+DcmQYCVnSAM+UByTVfIHnYQy2ODozfQP+4RdwtRDfobrIvKq1K4Exb2koJ79HSSa7s6B2SA8T/1YR3RA==} - '@next/swc-darwin-arm64@14.2.10': - resolution: {integrity: sha512-V3z10NV+cvMAfxQUMhKgfQnPbjw+Ew3cnr64b0lr8MDiBJs3eLnM6RpGC46nhfMZsiXgQngCJKWGTC/yDcgrDQ==} + '@next/swc-darwin-arm64@14.2.21': + resolution: {integrity: sha512-HwEjcKsXtvszXz5q5Z7wCtrHeTTDSTgAbocz45PHMUjU3fBYInfvhR+ZhavDRUYLonm53aHZbB09QtJVJj8T7g==} engines: {node: '>= 10'} cpu: [arm64] os: [darwin] - '@next/swc-darwin-x64@14.2.10': - resolution: {integrity: sha512-Y0TC+FXbFUQ2MQgimJ/7Ina2mXIKhE7F+GUe1SgnzRmwFY3hX2z8nyVCxE82I2RicspdkZnSWMn4oTjIKz4uzA==} + '@next/swc-darwin-x64@14.2.21': + resolution: {integrity: sha512-TSAA2ROgNzm4FhKbTbyJOBrsREOMVdDIltZ6aZiKvCi/v0UwFmwigBGeqXDA97TFMpR3LNNpw52CbVelkoQBxA==} engines: {node: '>= 10'} cpu: [x64] os: [darwin] - '@next/swc-linux-arm64-gnu@14.2.10': - resolution: {integrity: sha512-ZfQ7yOy5zyskSj9rFpa0Yd7gkrBnJTkYVSya95hX3zeBG9E55Z6OTNPn1j2BTFWvOVVj65C3T+qsjOyVI9DQpA==} + '@next/swc-linux-arm64-gnu@14.2.21': + resolution: {integrity: sha512-0Dqjn0pEUz3JG+AImpnMMW/m8hRtl1GQCNbO66V1yp6RswSTiKmnHf3pTX6xMdJYSemf3O4Q9ykiL0jymu0TuA==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] - '@next/swc-linux-arm64-musl@14.2.10': - resolution: {integrity: sha512-n2i5o3y2jpBfXFRxDREr342BGIQCJbdAUi/K4q6Env3aSx8erM9VuKXHw5KNROK9ejFSPf0LhoSkU/ZiNdacpQ==} + '@next/swc-linux-arm64-musl@14.2.21': + resolution: {integrity: sha512-Ggfw5qnMXldscVntwnjfaQs5GbBbjioV4B4loP+bjqNEb42fzZlAaK+ldL0jm2CTJga9LynBMhekNfV8W4+HBw==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] - '@next/swc-linux-x64-gnu@14.2.10': - resolution: {integrity: sha512-GXvajAWh2woTT0GKEDlkVhFNxhJS/XdDmrVHrPOA83pLzlGPQnixqxD8u3bBB9oATBKB//5e4vpACnx5Vaxdqg==} + '@next/swc-linux-x64-gnu@14.2.21': + resolution: {integrity: sha512-uokj0lubN1WoSa5KKdThVPRffGyiWlm/vCc/cMkWOQHw69Qt0X1o3b2PyLLx8ANqlefILZh1EdfLRz9gVpG6tg==} engines: {node: '>= 10'} cpu: [x64] os: [linux] - '@next/swc-linux-x64-musl@14.2.10': - resolution: {integrity: sha512-opFFN5B0SnO+HTz4Wq4HaylXGFV+iHrVxd3YvREUX9K+xfc4ePbRrxqOuPOFjtSuiVouwe6uLeDtabjEIbkmDA==} + '@next/swc-linux-x64-musl@14.2.21': + resolution: {integrity: sha512-iAEBPzWNbciah4+0yI4s7Pce6BIoxTQ0AGCkxn/UBuzJFkYyJt71MadYQkjPqCQCJAFQ26sYh7MOKdU+VQFgPg==} engines: {node: '>= 10'} cpu: [x64] os: [linux] - '@next/swc-win32-arm64-msvc@14.2.10': - resolution: {integrity: sha512-9NUzZuR8WiXTvv+EiU/MXdcQ1XUvFixbLIMNQiVHuzs7ZIFrJDLJDaOF1KaqttoTujpcxljM/RNAOmw1GhPPQQ==} + '@next/swc-win32-arm64-msvc@14.2.21': + resolution: {integrity: sha512-plykgB3vL2hB4Z32W3ktsfqyuyGAPxqwiyrAi2Mr8LlEUhNn9VgkiAl5hODSBpzIfWweX3er1f5uNpGDygfQVQ==} engines: {node: '>= 10'} cpu: [arm64] os: [win32] - '@next/swc-win32-ia32-msvc@14.2.10': - resolution: {integrity: sha512-fr3aEbSd1GeW3YUMBkWAu4hcdjZ6g4NBl1uku4gAn661tcxd1bHs1THWYzdsbTRLcCKLjrDZlNp6j2HTfrw+Bg==} + '@next/swc-win32-ia32-msvc@14.2.21': + resolution: {integrity: sha512-w5bacz4Vxqrh06BjWgua3Yf7EMDb8iMcVhNrNx8KnJXt8t+Uu0Zg4JHLDL/T7DkTCEEfKXO/Er1fcfWxn2xfPA==} engines: {node: '>= 10'} cpu: [ia32] os: [win32] - '@next/swc-win32-x64-msvc@14.2.10': - resolution: {integrity: sha512-UjeVoRGKNL2zfbcQ6fscmgjBAS/inHBh63mjIlfPg/NG8Yn2ztqylXt5qilYb6hoHIwaU2ogHknHWWmahJjgZQ==} + '@next/swc-win32-x64-msvc@14.2.21': + resolution: {integrity: sha512-sT6+llIkzpsexGYZq8cjjthRyRGe5cJVhqh12FmlbxHqna6zsDDK8UNaV7g41T6atFHCJUPeLb3uyAwrBwy0NA==} engines: {node: '>= 10'} cpu: [x64] os: [win32] @@ -1171,8 +1171,8 @@ packages: resolution: {integrity: sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==} engines: {node: '>=10'} - cross-spawn@7.0.3: - resolution: {integrity: sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==} + cross-spawn@7.0.6: + resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} engines: {node: '>= 8'} css-in-js-utils@3.1.0: @@ -2079,8 +2079,8 @@ packages: natural-compare@1.4.0: resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} - next@14.2.10: - resolution: {integrity: sha512-sDDExXnh33cY3RkS9JuFEKaS4HmlWmDKP1VJioucCG6z5KuA008DPsDZOzi8UfqEk3Ii+2NCQSJrfbEWtZZfww==} + next@14.2.21: + resolution: {integrity: sha512-rZmLwucLHr3/zfDMYbJXbw0ZeoBpirxkXuvsJbk7UPorvPYZhP7vq7aHbKnU7dQNCYIimRrbB2pp3xmf+wsYUg==} engines: {node: '>=18.17.0'} hasBin: true peerDependencies: @@ -3645,37 +3645,37 @@ snapshots: - bufferutil - utf-8-validate - '@next/env@14.2.10': {} + '@next/env@14.2.21': {} '@next/eslint-plugin-next@14.0.3': dependencies: glob: 7.1.7 - '@next/swc-darwin-arm64@14.2.10': + '@next/swc-darwin-arm64@14.2.21': optional: true - '@next/swc-darwin-x64@14.2.10': + '@next/swc-darwin-x64@14.2.21': optional: true - '@next/swc-linux-arm64-gnu@14.2.10': + '@next/swc-linux-arm64-gnu@14.2.21': optional: true - '@next/swc-linux-arm64-musl@14.2.10': + '@next/swc-linux-arm64-musl@14.2.21': optional: true - '@next/swc-linux-x64-gnu@14.2.10': + '@next/swc-linux-x64-gnu@14.2.21': optional: true - '@next/swc-linux-x64-musl@14.2.10': + '@next/swc-linux-x64-musl@14.2.21': optional: true - '@next/swc-win32-arm64-msvc@14.2.10': + '@next/swc-win32-arm64-msvc@14.2.21': optional: true - '@next/swc-win32-ia32-msvc@14.2.10': + '@next/swc-win32-ia32-msvc@14.2.21': optional: true - '@next/swc-win32-x64-msvc@14.2.10': + '@next/swc-win32-x64-msvc@14.2.21': optional: true '@nodelib/fs.scandir@2.1.5': @@ -4232,7 +4232,7 @@ snapshots: path-type: 4.0.0 yaml: 1.10.2 - cross-spawn@7.0.3: + cross-spawn@7.0.6: dependencies: path-key: 3.1.1 shebang-command: 2.0.0 @@ -4407,7 +4407,7 @@ snapshots: env-cmd@10.1.0: dependencies: commander: 4.1.1 - cross-spawn: 7.0.3 + cross-spawn: 7.0.6 error-ex@1.3.2: dependencies: @@ -4588,7 +4588,7 @@ snapshots: debug: 4.3.5 enhanced-resolve: 5.17.0 eslint: 8.57.0 - eslint-module-utils: 2.8.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.5.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.5.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.0))(eslint@8.57.0))(eslint@8.57.0) + eslint-module-utils: 2.8.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.5.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0) eslint-plugin-import: 2.29.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.5.3))(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0) fast-glob: 3.3.2 get-tsconfig: 4.7.5 @@ -4600,7 +4600,7 @@ snapshots: - eslint-import-resolver-webpack - supports-color - eslint-module-utils@2.8.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.5.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.5.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.0))(eslint@8.57.0))(eslint@8.57.0): + eslint-module-utils@2.8.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.5.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0): dependencies: debug: 3.2.7 optionalDependencies: @@ -4621,7 +4621,7 @@ snapshots: doctrine: 2.1.0 eslint: 8.57.0 eslint-import-resolver-node: 0.3.9 - eslint-module-utils: 2.8.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.5.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.5.3))(eslint-import-resolver-node@0.3.9)(eslint-plugin-import@2.29.1(eslint@8.57.0))(eslint@8.57.0))(eslint@8.57.0) + eslint-module-utils: 2.8.1(@typescript-eslint/parser@6.21.0(eslint@8.57.0)(typescript@5.5.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1)(eslint@8.57.0) hasown: 2.0.2 is-core-module: 2.14.0 is-glob: 4.0.3 @@ -4703,7 +4703,7 @@ snapshots: '@ungap/structured-clone': 1.2.0 ajv: 6.12.6 chalk: 4.1.2 - cross-spawn: 7.0.3 + cross-spawn: 7.0.6 debug: 4.3.5 doctrine: 3.0.0 escape-string-regexp: 4.0.0 @@ -4754,7 +4754,7 @@ snapshots: execa@5.1.1: dependencies: - cross-spawn: 7.0.3 + cross-spawn: 7.0.6 get-stream: 6.0.1 human-signals: 2.1.0 is-stream: 2.0.1 @@ -4833,7 +4833,7 @@ snapshots: foreground-child@3.2.1: dependencies: - cross-spawn: 7.0.3 + cross-spawn: 7.0.6 signal-exit: 4.1.0 form-data@4.0.0: @@ -5321,9 +5321,9 @@ snapshots: natural-compare@1.4.0: {} - next@14.2.10(react-dom@18.3.1(react@18.3.1))(react@18.3.1): + next@14.2.21(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: - '@next/env': 14.2.10 + '@next/env': 14.2.21 '@swc/helpers': 0.5.5 busboy: 1.6.0 caniuse-lite: 1.0.30001639 @@ -5333,15 +5333,15 @@ snapshots: react-dom: 18.3.1(react@18.3.1) styled-jsx: 5.1.1(react@18.3.1) optionalDependencies: - '@next/swc-darwin-arm64': 14.2.10 - '@next/swc-darwin-x64': 14.2.10 - '@next/swc-linux-arm64-gnu': 14.2.10 - '@next/swc-linux-arm64-musl': 14.2.10 - '@next/swc-linux-x64-gnu': 14.2.10 - '@next/swc-linux-x64-musl': 14.2.10 - '@next/swc-win32-arm64-msvc': 14.2.10 - '@next/swc-win32-ia32-msvc': 14.2.10 - '@next/swc-win32-x64-msvc': 14.2.10 + '@next/swc-darwin-arm64': 14.2.21 + '@next/swc-darwin-x64': 14.2.21 + '@next/swc-linux-arm64-gnu': 14.2.21 + '@next/swc-linux-arm64-musl': 14.2.21 + '@next/swc-linux-x64-gnu': 14.2.21 + '@next/swc-linux-x64-musl': 14.2.21 + '@next/swc-win32-arm64-msvc': 14.2.21 + '@next/swc-win32-ia32-msvc': 14.2.21 + '@next/swc-win32-x64-msvc': 14.2.21 transitivePeerDependencies: - '@babel/core' - babel-plugin-macros diff --git a/web/web/src/lib/api/models/index.js b/web/web/src/lib/api/models/index.js index fa968326d1a..74d2e0d368d 100644 --- a/web/web/src/lib/api/models/index.js +++ b/web/web/src/lib/api/models/index.js @@ -45,7 +45,7 @@ const Apis = { LINK_VERSION: ({ metalake, catalog, schema, model }) => `/api/metalakes/${encodeURIComponent(metalake)}/catalogs/${encodeURIComponent( catalog - )}/schemas/${encodeURIComponent(schema)}/models/${encodeURIComponent(model)}`, + )}/schemas/${encodeURIComponent(schema)}/models/${encodeURIComponent(model)}/versions`, DELETE_VERSION: ({ metalake, catalog, schema, model, version }) => { return `/api/metalakes/${encodeURIComponent(metalake)}/catalogs/${encodeURIComponent( catalog